Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[0] 
  177  if PYTHON_VERSION == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  192                   types.BuiltinFunctionType, 
  193                   types.MethodType, types.BuiltinMethodType) 
  194   
  195  TABLE_ARGS = set( 
  196      ('migrate','primarykey','fake_migrate','format','redefine', 
  197       'singular','plural','trigger_name','sequence_name','fields', 
  198       'common_filter','polymodel','table_class','on_define','actual_name')) 
  199   
  200  SELECT_ARGS = set( 
  201      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  202       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  203   
  204  ogetattr = object.__getattribute__ 
  205  osetattr = object.__setattr__ 
  206  exists = os.path.exists 
  207  pjoin = os.path.join 
  208   
  209  ################################################################################### 
  210  # following checks allow the use of dal without web2py, as a standalone module 
  211  ################################################################################### 
  212  try: 
  213      from gluon.utils import web2py_uuid 
  214  except (ImportError, SystemError): 
  215      import uuid 
216 - def web2py_uuid(): return str(uuid.uuid4())
217 218 try: 219 import portalocker 220 have_portalocker = True 221 except ImportError: 222 have_portalocker = False 223 224 try: 225 from gluon import serializers 226 have_serializers = True 227 except ImportError: 228 have_serializers = False 229 try: 230 import json as simplejson 231 except ImportError: 232 try: 233 import gluon.contrib.simplejson as simplejson 234 except ImportError: 235 simplejson = None 236 237 LOGGER = logging.getLogger("web2py.dal") 238 DEFAULT = lambda:0 239 240 GLOBAL_LOCKER = threading.RLock() 241 THREAD_LOCAL = threading.local() 242 243 # internal representation of tables with field 244 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 245 246 REGEX_TYPE = re.compile('^([\w\_\:]+)') 247 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 248 REGEX_W = re.compile('^\w+$') 249 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 250 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$') 251 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 252 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 253 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 254 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 255 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 256 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 257 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 258 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 259 REGEX_QUOTES = re.compile("'[^']*'") 260 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 261 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 262 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 263 264 # list of drivers will be built on the fly 265 # and lists only what is available 266 DRIVERS = [] 267 268 try: 269 from new import classobj 270 from google.appengine.ext import db as gae 271 from google.appengine.api import namespace_manager, rdbms 272 from google.appengine.api.datastore_types import Key ### for belongs on ID 273 from google.appengine.ext.db.polymodel import PolyModel 274 DRIVERS.append('google') 275 except ImportError: 276 pass 277 278 if not 'google' in DRIVERS: 279 280 try: 281 from pysqlite2 import dbapi2 as sqlite2 282 DRIVERS.append('SQLite(sqlite2)') 283 except ImportError: 284 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 285 286 try: 287 from sqlite3 import dbapi2 as sqlite3 288 DRIVERS.append('SQLite(sqlite3)') 289 except ImportError: 290 LOGGER.debug('no SQLite drivers sqlite3') 291 292 try: 293 # first try contrib driver, then from site-packages (if installed) 294 try: 295 import gluon.contrib.pymysql as pymysql 296 # monkeypatch pymysql because they havent fixed the bug: 297 # https://github.com/petehunt/PyMySQL/issues/86 298 pymysql.ESCAPE_REGEX = re.compile("'") 299 pymysql.ESCAPE_MAP = {"'": "''"} 300 # end monkeypatch 301 except ImportError: 302 import pymysql 303 DRIVERS.append('MySQL(pymysql)') 304 except ImportError: 305 LOGGER.debug('no MySQL driver pymysql') 306 307 try: 308 import MySQLdb 309 DRIVERS.append('MySQL(MySQLdb)') 310 except ImportError: 311 LOGGER.debug('no MySQL driver MySQLDB') 312 313 314 try: 315 import psycopg2 316 from psycopg2.extensions import adapt as psycopg2_adapt 317 DRIVERS.append('PostgreSQL(psycopg2)') 318 except ImportError: 319 LOGGER.debug('no PostgreSQL driver psycopg2') 320 321 try: 322 # first try contrib driver, then from site-packages (if installed) 323 try: 324 import gluon.contrib.pg8000.dbapi as pg8000 325 except ImportError: 326 import pg8000.dbapi as pg8000 327 DRIVERS.append('PostgreSQL(pg8000)') 328 except ImportError: 329 LOGGER.debug('no PostgreSQL driver pg8000') 330 331 try: 332 import cx_Oracle 333 DRIVERS.append('Oracle(cx_Oracle)') 334 except ImportError: 335 LOGGER.debug('no Oracle driver cx_Oracle') 336 337 try: 338 try: 339 import pyodbc 340 except ImportError: 341 try: 342 import gluon.contrib.pypyodbc as pyodbc 343 except Exception, e: 344 raise ImportError(str(e)) 345 DRIVERS.append('MSSQL(pyodbc)') 346 DRIVERS.append('DB2(pyodbc)') 347 DRIVERS.append('Teradata(pyodbc)') 348 DRIVERS.append('Ingres(pyodbc)') 349 except ImportError: 350 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 351 352 try: 353 import Sybase 354 DRIVERS.append('Sybase(Sybase)') 355 except ImportError: 356 LOGGER.debug('no Sybase driver') 357 358 try: 359 import kinterbasdb 360 DRIVERS.append('Interbase(kinterbasdb)') 361 DRIVERS.append('Firebird(kinterbasdb)') 362 except ImportError: 363 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 364 365 try: 366 import fdb 367 DRIVERS.append('Firebird(fdb)') 368 except ImportError: 369 LOGGER.debug('no Firebird driver fdb') 370 ##### 371 try: 372 import firebirdsql 373 DRIVERS.append('Firebird(firebirdsql)') 374 except ImportError: 375 LOGGER.debug('no Firebird driver firebirdsql') 376 377 try: 378 import informixdb 379 DRIVERS.append('Informix(informixdb)') 380 LOGGER.warning('Informix support is experimental') 381 except ImportError: 382 LOGGER.debug('no Informix driver informixdb') 383 384 try: 385 import sapdb 386 DRIVERS.append('SQL(sapdb)') 387 LOGGER.warning('SAPDB support is experimental') 388 except ImportError: 389 LOGGER.debug('no SAP driver sapdb') 390 391 try: 392 import cubriddb 393 DRIVERS.append('Cubrid(cubriddb)') 394 LOGGER.warning('Cubrid support is experimental') 395 except ImportError: 396 LOGGER.debug('no Cubrid driver cubriddb') 397 398 try: 399 from com.ziclix.python.sql import zxJDBC 400 import java.sql 401 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 402 from org.sqlite import JDBC # required by java.sql; ensure we have it 403 zxJDBC_sqlite = java.sql.DriverManager 404 DRIVERS.append('PostgreSQL(zxJDBC)') 405 DRIVERS.append('SQLite(zxJDBC)') 406 LOGGER.warning('zxJDBC support is experimental') 407 is_jdbc = True 408 except ImportError: 409 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 410 is_jdbc = False 411 412 try: 413 import couchdb 414 DRIVERS.append('CouchDB(couchdb)') 415 except ImportError: 416 LOGGER.debug('no Couchdb driver couchdb') 417 418 try: 419 import pymongo 420 DRIVERS.append('MongoDB(pymongo)') 421 except: 422 LOGGER.debug('no MongoDB driver pymongo') 423 424 try: 425 import imaplib 426 DRIVERS.append('IMAP(imaplib)') 427 except: 428 LOGGER.debug('no IMAP driver imaplib') 429 430 PLURALIZE_RULES = [ 431 (re.compile('child$'), re.compile('child$'), 'children'), 432 (re.compile('oot$'), re.compile('oot$'), 'eet'), 433 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 434 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 435 (re.compile('sis$'), re.compile('sis$'), 'ses'), 436 (re.compile('man$'), re.compile('man$'), 'men'), 437 (re.compile('ife$'), re.compile('ife$'), 'ives'), 438 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 439 (re.compile('lf$'), re.compile('lf$'), 'lves'), 440 (re.compile('[sxz]$'), re.compile('$'), 'es'), 441 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 442 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 443 (re.compile('$'), re.compile('$'), 's'), 444 ]
445 446 -def pluralize(singular, rules=PLURALIZE_RULES):
447 for line in rules: 448 re_search, re_sub, replace = line 449 plural = re_search.search(singular) and re_sub.sub(replace, singular) 450 if plural: return plural
451
452 -def hide_password(uri):
453 if isinstance(uri,(list,tuple)): 454 return [hide_password(item) for item in uri] 455 return REGEX_NOPASSWD.sub('******',uri)
456
457 -def OR(a,b):
458 return a|b
459
460 -def AND(a,b):
461 return a&b
462
463 -def IDENTITY(x): return x
464
465 -def varquote_aux(name,quotestr='%s'):
466 return name if REGEX_W.match(name) else quotestr % name
467
468 -def quote_keyword(a,keyword='timestamp'):
469 regex = re.compile('\.keyword(?=\w)') 470 a = regex.sub('."%s"' % keyword,a) 471 return a
472 473 if 'google' in DRIVERS: 474 475 is_jdbc = False
476 477 - class GAEDecimalProperty(gae.Property):
478 """ 479 GAE decimal implementation 480 """ 481 data_type = decimal.Decimal 482
483 - def __init__(self, precision, scale, **kwargs):
484 super(GAEDecimalProperty, self).__init__(self, **kwargs) 485 d = '1.' 486 for x in range(scale): 487 d += '0' 488 self.round = decimal.Decimal(d)
489
490 - def get_value_for_datastore(self, model_instance):
491 value = super(GAEDecimalProperty, self)\ 492 .get_value_for_datastore(model_instance) 493 if value is None or value == '': 494 return None 495 else: 496 return str(value)
497
498 - def make_value_from_datastore(self, value):
499 if value is None or value == '': 500 return None 501 else: 502 return decimal.Decimal(value).quantize(self.round)
503
504 - def validate(self, value):
505 value = super(GAEDecimalProperty, self).validate(value) 506 if value is None or isinstance(value, decimal.Decimal): 507 return value 508 elif isinstance(value, basestring): 509 return decimal.Decimal(value) 510 raise gae.BadValueError("Property %s must be a Decimal or string."\ 511 % self.name)
512
513 ################################################################################### 514 # class that handles connection pooling (all adapters are derived from this one) 515 ################################################################################### 516 517 -class ConnectionPool(object):
518 519 POOLS = {} 520 check_active_connection = True 521 522 @staticmethod
523 - def set_folder(folder):
525 526 # ## this allows gluon to commit/rollback all dbs in this thread 527
528 - def close(self,action='commit',really=True):
529 if action: 530 if callable(action): 531 action(self) 532 else: 533 getattr(self, action)() 534 # ## if you want pools, recycle this connection 535 if self.pool_size: 536 GLOBAL_LOCKER.acquire() 537 pool = ConnectionPool.POOLS[self.uri] 538 if len(pool) < self.pool_size: 539 pool.append(self.connection) 540 really = False 541 GLOBAL_LOCKER.release() 542 if really: 543 self.close_connection() 544 self.connection = None
545 546 @staticmethod
547 - def close_all_instances(action):
548 """ to close cleanly databases in a multithreaded environment """ 549 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 550 for db_uid, db_group in dbs: 551 for db in db_group: 552 if hasattr(db,'_adapter'): 553 db._adapter.close(action) 554 getattr(THREAD_LOCAL,'db_instances',{}).clear() 555 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 556 if callable(action): 557 action(None) 558 return
559
560 - def find_or_make_work_folder(self):
561 """ this actually does not make the folder. it has to be there """ 562 self.folder = getattr(THREAD_LOCAL,'folder','') 563 564 if (os.path.isabs(self.folder) and 565 isinstance(self, UseDatabaseStoredFile) and 566 self.folder.startswith(os.getcwd())): 567 self.folder = os.path.relpath(self.folder, os.getcwd()) 568 569 # Creating the folder if it does not exist 570 if False and self.folder and not exists(self.folder): 571 os.mkdir(self.folder)
572
573 - def after_connection_hook(self):
574 """hook for the after_connection parameter""" 575 if callable(self._after_connection): 576 self._after_connection(self) 577 self.after_connection()
578
579 - def after_connection(self):
580 """ this it is supposed to be overloaded by adapters""" 581 pass
582
583 - def reconnect(self, f=None, cursor=True):
584 """ 585 this function defines: self.connection and self.cursor 586 (iff cursor is True) 587 if self.pool_size>0 it will try pull the connection from the pool 588 if the connection is not active (closed by db server) it will loop 589 if not self.pool_size or no active connections in pool makes a new one 590 """ 591 if getattr(self,'connection', None) != None: 592 return 593 if f is None: 594 f = self.connector 595 596 # if not hasattr(self, "driver") or self.driver is None: 597 # LOGGER.debug("Skipping connection since there's no driver") 598 # return 599 600 if not self.pool_size: 601 self.connection = f() 602 self.cursor = cursor and self.connection.cursor() 603 else: 604 uri = self.uri 605 POOLS = ConnectionPool.POOLS 606 while True: 607 GLOBAL_LOCKER.acquire() 608 if not uri in POOLS: 609 POOLS[uri] = [] 610 if POOLS[uri]: 611 self.connection = POOLS[uri].pop() 612 GLOBAL_LOCKER.release() 613 self.cursor = cursor and self.connection.cursor() 614 try: 615 if self.cursor and self.check_active_connection: 616 self.execute('SELECT 1;') 617 break 618 except: 619 pass 620 else: 621 GLOBAL_LOCKER.release() 622 self.connection = f() 623 self.cursor = cursor and self.connection.cursor() 624 break 625 self.after_connection_hook()
626
627 628 ################################################################################### 629 # this is a generic adapter that does nothing; all others are derived from this one 630 ################################################################################### 631 632 -class BaseAdapter(ConnectionPool):
633 native_json = False 634 driver = None 635 driver_name = None 636 drivers = () # list of drivers from which to pick 637 connection = None 638 commit_on_alter_table = False 639 support_distributed_transaction = False 640 uploads_in_blob = False 641 can_select_for_update = True 642 dbpath = None 643 folder = None 644 645 TRUE = 'T' 646 FALSE = 'F' 647 T_SEP = ' ' 648 QUOTE_TEMPLATE = '"%s"' 649 650 types = { 651 'boolean': 'CHAR(1)', 652 'string': 'CHAR(%(length)s)', 653 'text': 'TEXT', 654 'json': 'TEXT', 655 'password': 'CHAR(%(length)s)', 656 'blob': 'BLOB', 657 'upload': 'CHAR(%(length)s)', 658 'integer': 'INTEGER', 659 'bigint': 'INTEGER', 660 'float':'DOUBLE', 661 'double': 'DOUBLE', 662 'decimal': 'DOUBLE', 663 'date': 'DATE', 664 'time': 'TIME', 665 'datetime': 'TIMESTAMP', 666 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 667 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 668 'list:integer': 'TEXT', 669 'list:string': 'TEXT', 670 'list:reference': 'TEXT', 671 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 672 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 673 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 674 } 675
676 - def isOperationalError(self,exception):
677 if not hasattr(self.driver, "OperationalError"): 678 return None 679 return isinstance(exception, self.driver.OperationalError)
680
681 - def isProgrammingError(self,exception):
682 if not hasattr(self.driver, "ProgrammingError"): 683 return None 684 return isinstance(exception, self.driver.ProgrammingError)
685
686 - def id_query(self, table):
687 pkeys = getattr(table,'_primarykey',None) 688 if pkeys: 689 return table[pkeys[0]] != None 690 else: 691 return table._id != None
692
693 - def adapt(self, obj):
694 return "'%s'" % obj.replace("'", "''")
695
696 - def smart_adapt(self, obj):
697 if isinstance(obj,(int,float)): 698 return str(obj) 699 return self.adapt(str(obj))
700
701 - def file_exists(self, filename):
702 """ 703 to be used ONLY for files that on GAE may not be on filesystem 704 """ 705 return exists(filename)
706
707 - def file_open(self, filename, mode='rb', lock=True):
708 """ 709 to be used ONLY for files that on GAE may not be on filesystem 710 """ 711 if have_portalocker and lock: 712 fileobj = portalocker.LockedFile(filename,mode) 713 else: 714 fileobj = open(filename,mode) 715 return fileobj
716
717 - def file_close(self, fileobj):
718 """ 719 to be used ONLY for files that on GAE may not be on filesystem 720 """ 721 if fileobj: 722 fileobj.close()
723
724 - def file_delete(self, filename):
725 os.unlink(filename)
726
727 - def find_driver(self,adapter_args,uri=None):
728 self.adapter_args = adapter_args 729 if getattr(self,'driver',None) != None: 730 return 731 drivers_available = [driver for driver in self.drivers 732 if driver in globals()] 733 if uri: 734 items = uri.split('://',1)[0].split(':') 735 request_driver = items[1] if len(items)>1 else None 736 else: 737 request_driver = None 738 request_driver = request_driver or adapter_args.get('driver') 739 if request_driver: 740 if request_driver in drivers_available: 741 self.driver_name = request_driver 742 self.driver = globals().get(request_driver) 743 else: 744 raise RuntimeError("driver %s not available" % request_driver) 745 elif drivers_available: 746 self.driver_name = drivers_available[0] 747 self.driver = globals().get(self.driver_name) 748 else: 749 raise RuntimeError("no driver available %s" % str(self.drivers))
750
751 - def log(self, message, table=None):
752 """ Logs migrations 753 754 It will not log changes if logfile is not specified. Defaults 755 to sql.log 756 """ 757 758 isabs = None 759 logfilename = self.adapter_args.get('logfile','sql.log') 760 writelog = bool(logfilename) 761 if writelog: 762 isabs = os.path.isabs(logfilename) 763 764 if table and table._dbt and writelog and self.folder: 765 if isabs: 766 table._loggername = logfilename 767 else: 768 table._loggername = pjoin(self.folder, logfilename) 769 logfile = self.file_open(table._loggername, 'a') 770 logfile.write(message) 771 self.file_close(logfile)
772 773
774 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 775 credential_decoder=IDENTITY, driver_args={}, 776 adapter_args={},do_connect=True, after_connection=None):
777 self.db = db 778 self.dbengine = "None" 779 self.uri = uri 780 self.pool_size = pool_size 781 self.folder = folder 782 self.db_codec = db_codec 783 self._after_connection = after_connection 784 class Dummy(object): 785 lastrowid = 1 786 def __getattr__(self, value): 787 return lambda *a, **b: []
788 self.connection = Dummy() 789 self.cursor = Dummy() 790
791 - def sequence_name(self,tablename):
792 return '%s_sequence' % tablename
793
794 - def trigger_name(self,tablename):
795 return '%s_sequence' % tablename
796
797 - def varquote(self,name):
798 return name
799
800 - def create_table(self, table, 801 migrate=True, 802 fake_migrate=False, 803 polymodel=None):
804 db = table._db 805 fields = [] 806 # PostGIS geo fields are added after the table has been created 807 postcreation_fields = [] 808 sql_fields = {} 809 sql_fields_aux = {} 810 TFK = {} 811 tablename = table._tablename 812 sortable = 0 813 types = self.types 814 for field in table: 815 sortable += 1 816 field_name = field.name 817 field_type = field.type 818 if isinstance(field_type,SQLCustomType): 819 ftype = field_type.native or field_type.type 820 elif field_type.startswith('reference'): 821 referenced = field_type[10:].strip() 822 if referenced == '.': 823 referenced = tablename 824 constraint_name = self.constraint_name(tablename, field_name) 825 if not '.' in referenced \ 826 and referenced != tablename \ 827 and hasattr(table,'_primarykey'): 828 ftype = types['integer'] 829 else: 830 if hasattr(table,'_primarykey'): 831 rtablename,rfieldname = referenced.split('.') 832 rtable = db[rtablename] 833 rfield = rtable[rfieldname] 834 # must be PK reference or unique 835 if rfieldname in rtable._primarykey or \ 836 rfield.unique: 837 ftype = types[rfield.type[:9]] % \ 838 dict(length=rfield.length) 839 # multicolumn primary key reference? 840 if not rfield.unique and len(rtable._primarykey)>1: 841 # then it has to be a table level FK 842 if rtablename not in TFK: 843 TFK[rtablename] = {} 844 TFK[rtablename][rfieldname] = field_name 845 else: 846 ftype = ftype + \ 847 types['reference FK'] % dict( 848 constraint_name = constraint_name, # should be quoted 849 foreign_key = '%s (%s)' % (rtablename, 850 rfieldname), 851 table_name = tablename, 852 field_name = field_name, 853 on_delete_action=field.ondelete) 854 else: 855 # make a guess here for circular references 856 if referenced in db: 857 id_fieldname = db[referenced]._id.name 858 elif referenced == tablename: 859 id_fieldname = table._id.name 860 else: #make a guess 861 id_fieldname = 'id' 862 ftype = types[field_type[:9]] % dict( 863 index_name = field_name+'__idx', 864 field_name = field_name, 865 constraint_name = constraint_name, 866 foreign_key = '%s (%s)' % (referenced, 867 id_fieldname), 868 on_delete_action=field.ondelete) 869 elif field_type.startswith('list:reference'): 870 ftype = types[field_type[:14]] 871 elif field_type.startswith('decimal'): 872 precision, scale = map(int,field_type[8:-1].split(',')) 873 ftype = types[field_type[:7]] % \ 874 dict(precision=precision,scale=scale) 875 elif field_type.startswith('geo'): 876 if not hasattr(self,'srid'): 877 raise RuntimeError('Adapter does not support geometry') 878 srid = self.srid 879 geotype, parms = field_type[:-1].split('(') 880 if not geotype in types: 881 raise SyntaxError( 882 'Field: unknown field type: %s for %s' \ 883 % (field_type, field_name)) 884 ftype = types[geotype] 885 if self.dbengine == 'postgres' and geotype == 'geometry': 886 # parameters: schema, srid, dimension 887 dimension = 2 # GIS.dimension ??? 888 parms = parms.split(',') 889 if len(parms) == 3: 890 schema, srid, dimension = parms 891 elif len(parms) == 2: 892 schema, srid = parms 893 else: 894 schema = parms[0] 895 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 896 ftype = ftype % dict(schema=schema, 897 tablename=tablename, 898 fieldname=field_name, srid=srid, 899 dimension=dimension) 900 postcreation_fields.append(ftype) 901 elif not field_type in types: 902 raise SyntaxError('Field: unknown field type: %s for %s' % \ 903 (field_type, field_name)) 904 else: 905 ftype = types[field_type]\ 906 % dict(length=field.length) 907 if not field_type.startswith('id') and \ 908 not field_type.startswith('reference'): 909 if field.notnull: 910 ftype += ' NOT NULL' 911 else: 912 ftype += self.ALLOW_NULL() 913 if field.unique: 914 ftype += ' UNIQUE' 915 if field.custom_qualifier: 916 ftype += ' %s' % field.custom_qualifier 917 918 # add to list of fields 919 sql_fields[field_name] = dict( 920 length=field.length, 921 unique=field.unique, 922 notnull=field.notnull, 923 sortable=sortable, 924 type=str(field_type), 925 sql=ftype) 926 927 if field.notnull and not field.default is None: 928 # Caveat: sql_fields and sql_fields_aux 929 # differ for default values. 930 # sql_fields is used to trigger migrations and sql_fields_aux 931 # is used for create tables. 932 # The reason is that we do not want to trigger 933 # a migration simply because a default value changes. 934 not_null = self.NOT_NULL(field.default, field_type) 935 ftype = ftype.replace('NOT NULL', not_null) 936 sql_fields_aux[field_name] = dict(sql=ftype) 937 # Postgres - PostGIS: 938 # geometry fields are added after the table has been created, not now 939 if not (self.dbengine == 'postgres' and \ 940 field_type.startswith('geom')): 941 fields.append('%s %s' % (field_name, ftype)) 942 other = ';' 943 944 # backend-specific extensions to fields 945 if self.dbengine == 'mysql': 946 if not hasattr(table, "_primarykey"): 947 fields.append('PRIMARY KEY(%s)' % table._id.name) 948 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 949 950 fields = ',\n '.join(fields) 951 for rtablename in TFK: 952 rfields = TFK[rtablename] 953 pkeys = db[rtablename]._primarykey 954 fkeys = [ rfields[k] for k in pkeys ] 955 fields = fields + ',\n ' + \ 956 types['reference TFK'] % dict( 957 table_name = tablename, 958 field_name=', '.join(fkeys), 959 foreign_table = rtablename, 960 foreign_key = ', '.join(pkeys), 961 on_delete_action = field.ondelete) 962 963 if getattr(table,'_primarykey',None): 964 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 965 (tablename, fields, 966 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 967 else: 968 query = "CREATE TABLE %s(\n %s\n)%s" % \ 969 (tablename, fields, other) 970 971 if self.uri.startswith('sqlite:///') \ 972 or self.uri.startswith('spatialite:///'): 973 path_encoding = sys.getfilesystemencoding() \ 974 or locale.getdefaultlocale()[1] or 'utf8' 975 dbpath = self.uri[9:self.uri.rfind('/')]\ 976 .decode('utf8').encode(path_encoding) 977 else: 978 dbpath = self.folder 979 980 if not migrate: 981 return query 982 elif self.uri.startswith('sqlite:memory')\ 983 or self.uri.startswith('spatialite:memory'): 984 table._dbt = None 985 elif isinstance(migrate, str): 986 table._dbt = pjoin(dbpath, migrate) 987 else: 988 table._dbt = pjoin( 989 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 990 991 if not table._dbt or not self.file_exists(table._dbt): 992 if table._dbt: 993 self.log('timestamp: %s\n%s\n' 994 % (datetime.datetime.today().isoformat(), 995 query), table) 996 if not fake_migrate: 997 self.create_sequence_and_triggers(query,table) 998 table._db.commit() 999 # Postgres geom fields are added now, 1000 # after the table has been created 1001 for query in postcreation_fields: 1002 self.execute(query) 1003 table._db.commit() 1004 if table._dbt: 1005 tfile = self.file_open(table._dbt, 'w') 1006 pickle.dump(sql_fields, tfile) 1007 self.file_close(tfile) 1008 if fake_migrate: 1009 self.log('faked!\n', table) 1010 else: 1011 self.log('success!\n', table) 1012 else: 1013 tfile = self.file_open(table._dbt, 'r') 1014 try: 1015 sql_fields_old = pickle.load(tfile) 1016 except EOFError: 1017 self.file_close(tfile) 1018 raise RuntimeError('File %s appears corrupted' % table._dbt) 1019 self.file_close(tfile) 1020 if sql_fields != sql_fields_old: 1021 self.migrate_table(table, 1022 sql_fields, sql_fields_old, 1023 sql_fields_aux, None, 1024 fake_migrate=fake_migrate) 1025 return query
1026
1027 - def migrate_table( 1028 self, 1029 table, 1030 sql_fields, 1031 sql_fields_old, 1032 sql_fields_aux, 1033 logfile, 1034 fake_migrate=False, 1035 ):
1036 1037 # logfile is deprecated (moved to adapter.log method) 1038 db = table._db 1039 db._migrated.append(table._tablename) 1040 tablename = table._tablename 1041 def fix(item): 1042 k,v=item 1043 if not isinstance(v,dict): 1044 v=dict(type='unknown',sql=v) 1045 return k.lower(),v
1046 # make sure all field names are lower case to avoid 1047 # migrations because of case cahnge 1048 sql_fields = dict(map(fix,sql_fields.iteritems())) 1049 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1050 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1051 if db._debug: 1052 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1053 1054 keys = sql_fields.keys() 1055 for key in sql_fields_old: 1056 if not key in keys: 1057 keys.append(key) 1058 new_add = self.concat_add(tablename) 1059 1060 metadata_change = False 1061 sql_fields_current = copy.copy(sql_fields_old) 1062 for key in keys: 1063 query = None 1064 if not key in sql_fields_old: 1065 sql_fields_current[key] = sql_fields[key] 1066 if self.dbengine in ('postgres',) and \ 1067 sql_fields[key]['type'].startswith('geometry'): 1068 # 'sql' == ftype in sql 1069 query = [ sql_fields[key]['sql'] ] 1070 else: 1071 query = ['ALTER TABLE %s ADD %s %s;' % \ 1072 (tablename, key, 1073 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1074 metadata_change = True 1075 elif self.dbengine in ('sqlite', 'spatialite'): 1076 if key in sql_fields: 1077 sql_fields_current[key] = sql_fields[key] 1078 metadata_change = True 1079 elif not key in sql_fields: 1080 del sql_fields_current[key] 1081 ftype = sql_fields_old[key]['type'] 1082 if (self.dbengine in ('postgres',) and 1083 ftype.startswith('geometry')): 1084 geotype, parms = ftype[:-1].split('(') 1085 schema = parms.split(',')[0] 1086 query = [ "SELECT DropGeometryColumn ('%(schema)s', "+ 1087 "'%(table)s', '%(field)s');" % 1088 dict(schema=schema, table=tablename, field=key,) ] 1089 elif self.dbengine in ('firebird',): 1090 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1091 else: 1092 query = ['ALTER TABLE %s DROP COLUMN %s;' % 1093 (tablename, key)] 1094 metadata_change = True 1095 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1096 and not (key in table.fields and 1097 isinstance(table[key].type, SQLCustomType)) \ 1098 and not sql_fields[key]['type'].startswith('reference')\ 1099 and not sql_fields[key]['type'].startswith('double')\ 1100 and not sql_fields[key]['type'].startswith('id'): 1101 sql_fields_current[key] = sql_fields[key] 1102 t = tablename 1103 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1104 if self.dbengine in ('firebird',): 1105 drop_expr = 'ALTER TABLE %s DROP %s;' 1106 else: 1107 drop_expr = 'ALTER TABLE %s DROP COLUMN %s;' 1108 key_tmp = key + '__tmp' 1109 query = ['ALTER TABLE %s ADD %s %s;' % (t, key_tmp, tt), 1110 'UPDATE %s SET %s=%s;' % (t, key_tmp, key), 1111 drop_expr % (t, key), 1112 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1113 'UPDATE %s SET %s=%s;' % (t, key, key_tmp), 1114 drop_expr % (t, key_tmp)] 1115 metadata_change = True 1116 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1117 sql_fields_current[key] = sql_fields[key] 1118 metadata_change = True 1119 1120 if query: 1121 self.log('timestamp: %s\n' 1122 % datetime.datetime.today().isoformat(), table) 1123 db['_lastsql'] = '\n'.join(query) 1124 for sub_query in query: 1125 self.log(sub_query + '\n', table) 1126 if fake_migrate: 1127 if db._adapter.commit_on_alter_table: 1128 self.save_dbt(table,sql_fields_current) 1129 self.log('faked!\n', table) 1130 else: 1131 self.execute(sub_query) 1132 # Caveat: mysql, oracle and firebird 1133 # do not allow multiple alter table 1134 # in one transaction so we must commit 1135 # partial transactions and 1136 # update table._dbt after alter table. 1137 if db._adapter.commit_on_alter_table: 1138 db.commit() 1139 self.save_dbt(table,sql_fields_current) 1140 self.log('success!\n', table) 1141 1142 elif metadata_change: 1143 self.save_dbt(table,sql_fields_current) 1144 1145 if metadata_change and not (query and db._adapter.commit_on_alter_table): 1146 db.commit() 1147 self.save_dbt(table,sql_fields_current) 1148 self.log('success!\n', table) 1149
1150 - def save_dbt(self,table, sql_fields_current):
1151 tfile = self.file_open(table._dbt, 'w') 1152 pickle.dump(sql_fields_current, tfile) 1153 self.file_close(tfile)
1154
1155 - def LOWER(self, first):
1156 return 'LOWER(%s)' % self.expand(first)
1157
1158 - def UPPER(self, first):
1159 return 'UPPER(%s)' % self.expand(first)
1160
1161 - def COUNT(self, first, distinct=None):
1162 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1163 % self.expand(first)
1164
1165 - def EXTRACT(self, first, what):
1166 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1167
1168 - def EPOCH(self, first):
1169 return self.EXTRACT(first, 'epoch')
1170
1171 - def LENGTH(self, first):
1172 return "LENGTH(%s)" % self.expand(first)
1173
1174 - def AGGREGATE(self, first, what):
1175 return "%s(%s)" % (what, self.expand(first))
1176
1177 - def JOIN(self):
1178 return 'JOIN'
1179
1180 - def LEFT_JOIN(self):
1181 return 'LEFT JOIN'
1182
1183 - def RANDOM(self):
1184 return 'Random()'
1185
1186 - def NOT_NULL(self, default, field_type):
1187 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1188
1189 - def COALESCE(self, first, second):
1190 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1191 return 'COALESCE(%s)' % ','.join(expressions)
1192
1193 - def COALESCE_ZERO(self, first):
1194 return 'COALESCE(%s,0)' % self.expand(first)
1195
1196 - def RAW(self, first):
1197 return first
1198
1199 - def ALLOW_NULL(self):
1200 return ''
1201
1202 - def SUBSTRING(self, field, parameters):
1203 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1204
1205 - def PRIMARY_KEY(self, key):
1206 return 'PRIMARY KEY(%s)' % key
1207
1208 - def _drop(self, table, mode):
1209 return ['DROP TABLE %s;' % table]
1210
1211 - def drop(self, table, mode=''):
1212 db = table._db 1213 queries = self._drop(table, mode) 1214 for query in queries: 1215 if table._dbt: 1216 self.log(query + '\n', table) 1217 self.execute(query) 1218 db.commit() 1219 del db[table._tablename] 1220 del db.tables[db.tables.index(table._tablename)] 1221 db._remove_references_to(table) 1222 if table._dbt: 1223 self.file_delete(table._dbt) 1224 self.log('success!\n', table)
1225
1226 - def _insert(self, table, fields):
1227 if fields: 1228 keys = ','.join(f.name for f, v in fields) 1229 values = ','.join(self.expand(v, f.type) for f, v in fields) 1230 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1231 else: 1232 return self._insert_empty(table)
1233
1234 - def _insert_empty(self, table):
1235 return 'INSERT INTO %s DEFAULT VALUES;' % table
1236
1237 - def insert(self, table, fields):
1238 query = self._insert(table,fields) 1239 try: 1240 self.execute(query) 1241 except Exception: 1242 e = sys.exc_info()[1] 1243 if hasattr(table,'_on_insert_error'): 1244 return table._on_insert_error(table,fields,e) 1245 raise e 1246 if hasattr(table,'_primarykey'): 1247 return dict([(k[0].name, k[1]) for k in fields \ 1248 if k[0].name in table._primarykey]) 1249 id = self.lastrowid(table) 1250 if not isinstance(id,int): 1251 return id 1252 rid = Reference(id) 1253 (rid._table, rid._record) = (table, None) 1254 return rid
1255
1256 - def bulk_insert(self, table, items):
1257 return [self.insert(table,item) for item in items]
1258
1259 - def NOT(self, first):
1260 return '(NOT %s)' % self.expand(first)
1261
1262 - def AND(self, first, second):
1263 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1264
1265 - def OR(self, first, second):
1266 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1267
1268 - def BELONGS(self, first, second):
1269 if isinstance(second, str): 1270 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1271 if not second: 1272 return '(1=0)' 1273 items = ','.join(self.expand(item, first.type) for item in second) 1274 return '(%s IN (%s))' % (self.expand(first), items)
1275
1276 - def REGEXP(self, first, second):
1277 "regular expression operator" 1278 raise NotImplementedError
1279
1280 - def LIKE(self, first, second):
1281 "case sensitive like operator" 1282 raise NotImplementedError
1283
1284 - def ILIKE(self, first, second):
1285 "case in-sensitive like operator" 1286 return '(%s LIKE %s)' % (self.expand(first), 1287 self.expand(second, 'string'))
1288
1289 - def STARTSWITH(self, first, second):
1290 return '(%s LIKE %s)' % (self.expand(first), 1291 self.expand(second+'%', 'string'))
1292
1293 - def ENDSWITH(self, first, second):
1294 return '(%s LIKE %s)' % (self.expand(first), 1295 self.expand('%'+second, 'string'))
1296
1297 - def CONTAINS(self,first,second,case_sensitive=False):
1298 if first.type in ('string','text', 'json'): 1299 if isinstance(second,Expression): 1300 second = Expression(None,self.CONCAT('%',Expression( 1301 None,self.REPLACE(second,('%','%%'))),'%')) 1302 else: 1303 second = '%'+str(second).replace('%','%%')+'%' 1304 elif first.type.startswith('list:'): 1305 if isinstance(second,Expression): 1306 second = Expression(None,self.CONCAT( 1307 '%|',Expression(None,self.REPLACE( 1308 Expression(None,self.REPLACE( 1309 second,('%','%%'))),('|','||'))),'|%')) 1310 else: 1311 second = '%|'+str(second).replace('%','%%')\ 1312 .replace('|','||')+'|%' 1313 op = case_sensitive and self.LIKE or self.ILIKE 1314 return op(first,second)
1315
1316 - def EQ(self, first, second=None):
1317 if second is None: 1318 return '(%s IS NULL)' % self.expand(first) 1319 return '(%s = %s)' % (self.expand(first), 1320 self.expand(second, first.type))
1321
1322 - def NE(self, first, second=None):
1323 if second is None: 1324 return '(%s IS NOT NULL)' % self.expand(first) 1325 return '(%s <> %s)' % (self.expand(first), 1326 self.expand(second, first.type))
1327
1328 - def LT(self,first,second=None):
1329 if second is None: 1330 raise RuntimeError("Cannot compare %s < None" % first) 1331 return '(%s < %s)' % (self.expand(first), 1332 self.expand(second,first.type))
1333
1334 - def LE(self,first,second=None):
1335 if second is None: 1336 raise RuntimeError("Cannot compare %s <= None" % first) 1337 return '(%s <= %s)' % (self.expand(first), 1338 self.expand(second,first.type))
1339
1340 - def GT(self,first,second=None):
1341 if second is None: 1342 raise RuntimeError("Cannot compare %s > None" % first) 1343 return '(%s > %s)' % (self.expand(first), 1344 self.expand(second,first.type))
1345
1346 - def GE(self,first,second=None):
1347 if second is None: 1348 raise RuntimeError("Cannot compare %s >= None" % first) 1349 return '(%s >= %s)' % (self.expand(first), 1350 self.expand(second,first.type))
1351
1352 - def is_numerical_type(self, ftype):
1353 return ftype in ('integer','boolean','double','bigint') or \ 1354 ftype.startswith('decimal')
1355
1356 - def REPLACE(self, first, (second, third)):
1357 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1358 self.expand(second,'string'), 1359 self.expand(third,'string'))
1360
1361 - def CONCAT(self, *items):
1362 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1363
1364 - def ADD(self, first, second):
1365 if self.is_numerical_type(first.type): 1366 return '(%s + %s)' % (self.expand(first), 1367 self.expand(second, first.type)) 1368 else: 1369 return self.CONCAT(first, second)
1370
1371 - def SUB(self, first, second):
1372 return '(%s - %s)' % (self.expand(first), 1373 self.expand(second, first.type))
1374
1375 - def MUL(self, first, second):
1376 return '(%s * %s)' % (self.expand(first), 1377 self.expand(second, first.type))
1378
1379 - def DIV(self, first, second):
1380 return '(%s / %s)' % (self.expand(first), 1381 self.expand(second, first.type))
1382
1383 - def MOD(self, first, second):
1384 return '(%s %% %s)' % (self.expand(first), 1385 self.expand(second, first.type))
1386
1387 - def AS(self, first, second):
1388 return '%s AS %s' % (self.expand(first), second)
1389
1390 - def ON(self, first, second):
1391 if use_common_filters(second): 1392 second = self.common_filter(second,[first._tablename]) 1393 return '%s ON %s' % (self.expand(first), self.expand(second))
1394
1395 - def INVERT(self, first):
1396 return '%s DESC' % self.expand(first)
1397
1398 - def COMMA(self, first, second):
1399 return '%s, %s' % (self.expand(first), self.expand(second))
1400
1401 - def expand(self, expression, field_type=None):
1402 if isinstance(expression, Field): 1403 out = '%s.%s' % (expression.table._tablename, expression.name) 1404 if field_type == 'string' and not expression.type in ( 1405 'string','text','json','password'): 1406 out = 'CAST(%s AS %s)' % (out, self.types['text']) 1407 return out 1408 elif isinstance(expression, (Expression, Query)): 1409 first = expression.first 1410 second = expression.second 1411 op = expression.op 1412 optional_args = expression.optional_args or {} 1413 if not second is None: 1414 out = op(first, second, **optional_args) 1415 elif not first is None: 1416 out = op(first,**optional_args) 1417 elif isinstance(op, str): 1418 if op.endswith(';'): 1419 op=op[:-1] 1420 out = '(%s)' % op 1421 else: 1422 out = op() 1423 return out 1424 elif field_type: 1425 return str(self.represent(expression,field_type)) 1426 elif isinstance(expression,(list,tuple)): 1427 return ','.join(self.represent(item,field_type) \ 1428 for item in expression) 1429 elif isinstance(expression, bool): 1430 return '1' if expression else '0' 1431 else: 1432 return str(expression)
1433
1434 - def table_alias(self,name):
1435 return str(name if isinstance(name,Table) else self.db[name])
1436
1437 - def alias(self, table, alias):
1438 """ 1439 Given a table object, makes a new table object 1440 with alias name. 1441 """ 1442 other = copy.copy(table) 1443 other['_ot'] = other._ot or other._tablename 1444 other['ALL'] = SQLALL(other) 1445 other['_tablename'] = alias 1446 for fieldname in other.fields: 1447 other[fieldname] = copy.copy(other[fieldname]) 1448 other[fieldname]._tablename = alias 1449 other[fieldname].tablename = alias 1450 other[fieldname].table = other 1451 table._db[alias] = other 1452 return other
1453
1454 - def _truncate(self, table, mode=''):
1455 tablename = table._tablename 1456 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1457
1458 - def truncate(self, table, mode= ' '):
1459 # Prepare functions "write_to_logfile" and "close_logfile" 1460 try: 1461 queries = table._db._adapter._truncate(table, mode) 1462 for query in queries: 1463 self.log(query + '\n', table) 1464 self.execute(query) 1465 table._db.commit() 1466 self.log('success!\n', table) 1467 finally: 1468 pass
1469
1470 - def _update(self, tablename, query, fields):
1471 if query: 1472 if use_common_filters(query): 1473 query = self.common_filter(query, [tablename]) 1474 sql_w = ' WHERE ' + self.expand(query) 1475 else: 1476 sql_w = '' 1477 sql_v = ','.join(['%s=%s' % (field.name, 1478 self.expand(value, field.type)) \ 1479 for (field, value) in fields]) 1480 tablename = "%s" % self.db[tablename] 1481 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1482
1483 - def update(self, tablename, query, fields):
1484 sql = self._update(tablename, query, fields) 1485 try: 1486 self.execute(sql) 1487 except Exception: 1488 e = sys.exc_info()[1] 1489 table = self.db[tablename] 1490 if hasattr(table,'_on_update_error'): 1491 return table._on_update_error(table,query,fields,e) 1492 raise e 1493 try: 1494 return self.cursor.rowcount 1495 except: 1496 return None
1497
1498 - def _delete(self, tablename, query):
1499 if query: 1500 if use_common_filters(query): 1501 query = self.common_filter(query, [tablename]) 1502 sql_w = ' WHERE ' + self.expand(query) 1503 else: 1504 sql_w = '' 1505 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1506
1507 - def delete(self, tablename, query):
1508 sql = self._delete(tablename, query) 1509 ### special code to handle CASCADE in SQLite & SpatiaLite 1510 db = self.db 1511 table = db[tablename] 1512 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1513 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1514 ### end special code to handle CASCADE in SQLite & SpatiaLite 1515 self.execute(sql) 1516 try: 1517 counter = self.cursor.rowcount 1518 except: 1519 counter = None 1520 ### special code to handle CASCADE in SQLite & SpatiaLite 1521 if self.dbengine in ('sqlite', 'spatialite') and counter: 1522 for field in table._referenced_by: 1523 if field.type=='reference '+table._tablename \ 1524 and field.ondelete=='CASCADE': 1525 db(field.belongs(deleted)).delete() 1526 ### end special code to handle CASCADE in SQLite & SpatiaLite 1527 return counter
1528
1529 - def get_table(self, query):
1530 tablenames = self.tables(query) 1531 if len(tablenames)==1: 1532 return tablenames[0] 1533 elif len(tablenames)<1: 1534 raise RuntimeError("No table selected") 1535 else: 1536 raise RuntimeError("Too many tables selected")
1537
1538 - def expand_all(self, fields, tablenames):
1539 db = self.db 1540 new_fields = [] 1541 append = new_fields.append 1542 for item in fields: 1543 if isinstance(item,SQLALL): 1544 new_fields += item._table 1545 elif isinstance(item,str): 1546 if REGEX_TABLE_DOT_FIELD.match(item): 1547 tablename,fieldname = item.split('.') 1548 append(db[tablename][fieldname]) 1549 else: 1550 append(Expression(db,lambda item=item:item)) 1551 else: 1552 append(item) 1553 # ## if no fields specified take them all from the requested tables 1554 if not new_fields: 1555 for table in tablenames: 1556 for field in db[table]: 1557 append(field) 1558 return new_fields
1559
1560 - def _select(self, query, fields, attributes):
1561 tables = self.tables 1562 for key in set(attributes.keys())-SELECT_ARGS: 1563 raise SyntaxError('invalid select attribute: %s' % key) 1564 args_get = attributes.get 1565 tablenames = tables(query) 1566 tablenames_for_common_filters = tablenames 1567 for field in fields: 1568 if isinstance(field, basestring) \ 1569 and REGEX_TABLE_DOT_FIELD.match(field): 1570 tn,fn = field.split('.') 1571 field = self.db[tn][fn] 1572 for tablename in tables(field): 1573 if not tablename in tablenames: 1574 tablenames.append(tablename) 1575 1576 if len(tablenames) < 1: 1577 raise SyntaxError('Set: no tables selected') 1578 self._colnames = map(self.expand, fields) 1579 def geoexpand(field): 1580 if isinstance(field.type,str) and field.type.startswith('geometry'): 1581 field = field.st_astext() 1582 return self.expand(field)
1583 sql_f = ', '.join(map(geoexpand, fields)) 1584 sql_o = '' 1585 sql_s = '' 1586 left = args_get('left', False) 1587 inner_join = args_get('join', False) 1588 distinct = args_get('distinct', False) 1589 groupby = args_get('groupby', False) 1590 orderby = args_get('orderby', False) 1591 having = args_get('having', False) 1592 limitby = args_get('limitby', False) 1593 orderby_on_limitby = args_get('orderby_on_limitby', True) 1594 for_update = args_get('for_update', False) 1595 if self.can_select_for_update is False and for_update is True: 1596 raise SyntaxError('invalid select attribute: for_update') 1597 if distinct is True: 1598 sql_s += 'DISTINCT' 1599 elif distinct: 1600 sql_s += 'DISTINCT ON (%s)' % distinct 1601 if inner_join: 1602 icommand = self.JOIN() 1603 if not isinstance(inner_join, (tuple, list)): 1604 inner_join = [inner_join] 1605 ijoint = [t._tablename for t in inner_join 1606 if not isinstance(t,Expression)] 1607 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1608 itables_to_merge={} #issue 490 1609 [itables_to_merge.update( 1610 dict.fromkeys(tables(t))) for t in ijoinon] 1611 ijoinont = [t.first._tablename for t in ijoinon] 1612 [itables_to_merge.pop(t) for t in ijoinont 1613 if t in itables_to_merge] #issue 490 1614 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1615 iexcluded = [t for t in tablenames 1616 if not t in iimportant_tablenames] 1617 if left: 1618 join = attributes['left'] 1619 command = self.LEFT_JOIN() 1620 if not isinstance(join, (tuple, list)): 1621 join = [join] 1622 joint = [t._tablename for t in join 1623 if not isinstance(t, Expression)] 1624 joinon = [t for t in join if isinstance(t, Expression)] 1625 #patch join+left patch (solves problem with ordering in left joins) 1626 tables_to_merge={} 1627 [tables_to_merge.update( 1628 dict.fromkeys(tables(t))) for t in joinon] 1629 joinont = [t.first._tablename for t in joinon] 1630 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1631 tablenames_for_common_filters = [t for t in tablenames 1632 if not t in joinont ] 1633 important_tablenames = joint + joinont + tables_to_merge.keys() 1634 excluded = [t for t in tablenames 1635 if not t in important_tablenames ] 1636 else: 1637 excluded = tablenames 1638 1639 if use_common_filters(query): 1640 query = self.common_filter(query,tablenames_for_common_filters) 1641 sql_w = ' WHERE ' + self.expand(query) if query else '' 1642 1643 if inner_join and not left: 1644 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1645 itables_to_merge.keys()]) 1646 for t in ijoinon: 1647 sql_t += ' %s %s' % (icommand, t) 1648 elif not inner_join and left: 1649 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1650 tables_to_merge.keys()]) 1651 if joint: 1652 sql_t += ' %s %s' % (command, 1653 ','.join([self.table_alias(t) for t in joint])) 1654 for t in joinon: 1655 sql_t += ' %s %s' % (command, t) 1656 elif inner_join and left: 1657 all_tables_in_query = set(important_tablenames + \ 1658 iimportant_tablenames + \ 1659 tablenames) 1660 tables_in_joinon = set(joinont + ijoinont) 1661 tables_not_in_joinon = \ 1662 all_tables_in_query.difference(tables_in_joinon) 1663 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1664 for t in ijoinon: 1665 sql_t += ' %s %s' % (icommand, t) 1666 if joint: 1667 sql_t += ' %s %s' % (command, 1668 ','.join([self.table_alias(t) for t in joint])) 1669 for t in joinon: 1670 sql_t += ' %s %s' % (command, t) 1671 else: 1672 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1673 if groupby: 1674 if isinstance(groupby, (list, tuple)): 1675 groupby = xorify(groupby) 1676 sql_o += ' GROUP BY %s' % self.expand(groupby) 1677 if having: 1678 sql_o += ' HAVING %s' % attributes['having'] 1679 if orderby: 1680 if isinstance(orderby, (list, tuple)): 1681 orderby = xorify(orderby) 1682 if str(orderby) == '<random>': 1683 sql_o += ' ORDER BY %s' % self.RANDOM() 1684 else: 1685 sql_o += ' ORDER BY %s' % self.expand(orderby) 1686 if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby): 1687 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1688 # oracle does not support limitby 1689 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1690 if for_update and self.can_select_for_update is True: 1691 sql = sql.rstrip(';') + ' FOR UPDATE;' 1692 return sql 1693
1694 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1695 if limitby: 1696 (lmin, lmax) = limitby 1697 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1698 return 'SELECT %s %s FROM %s%s%s;' % \ 1699 (sql_s, sql_f, sql_t, sql_w, sql_o)
1700
1701 - def _fetchall(self):
1702 return self.cursor.fetchall()
1703
1704 - def _select_aux(self,sql,fields,attributes):
1705 args_get = attributes.get 1706 cache = args_get('cache',None) 1707 if not cache: 1708 self.execute(sql) 1709 rows = self._fetchall() 1710 else: 1711 (cache_model, time_expire) = cache 1712 key = self.uri + '/' + sql + '/rows' 1713 if len(key)>200: key = hashlib_md5(key).hexdigest() 1714 def _select_aux2(): 1715 self.execute(sql) 1716 return self._fetchall()
1717 rows = cache_model(key,_select_aux2,time_expire) 1718 if isinstance(rows,tuple): 1719 rows = list(rows) 1720 limitby = args_get('limitby', None) or (0,) 1721 rows = self.rowslice(rows,limitby[0],None) 1722 processor = args_get('processor',self.parse) 1723 cacheable = args_get('cacheable',False) 1724 return processor(rows,fields,self._colnames,cacheable=cacheable) 1725
1726 - def select(self, query, fields, attributes):
1727 """ 1728 Always returns a Rows object, possibly empty. 1729 """ 1730 sql = self._select(query, fields, attributes) 1731 cache = attributes.get('cache', None) 1732 if cache and attributes.get('cacheable',False): 1733 del attributes['cache'] 1734 (cache_model, time_expire) = cache 1735 key = self.uri + '/' + sql 1736 if len(key)>200: key = hashlib_md5(key).hexdigest() 1737 args = (sql,fields,attributes) 1738 return cache_model( 1739 key, 1740 lambda self=self,args=args:self._select_aux(*args), 1741 time_expire) 1742 else: 1743 return self._select_aux(sql,fields,attributes)
1744
1745 - def _count(self, query, distinct=None):
1746 tablenames = self.tables(query) 1747 if query: 1748 if use_common_filters(query): 1749 query = self.common_filter(query, tablenames) 1750 sql_w = ' WHERE ' + self.expand(query) 1751 else: 1752 sql_w = '' 1753 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1754 if distinct: 1755 if isinstance(distinct,(list, tuple)): 1756 distinct = xorify(distinct) 1757 sql_d = self.expand(distinct) 1758 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1759 (sql_d, sql_t, sql_w) 1760 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1761
1762 - def count(self, query, distinct=None):
1763 self.execute(self._count(query, distinct)) 1764 return self.cursor.fetchone()[0]
1765
1766 - def tables(self, *queries):
1767 tables = set() 1768 for query in queries: 1769 if isinstance(query, Field): 1770 tables.add(query.tablename) 1771 elif isinstance(query, (Expression, Query)): 1772 if not query.first is None: 1773 tables = tables.union(self.tables(query.first)) 1774 if not query.second is None: 1775 tables = tables.union(self.tables(query.second)) 1776 return list(tables)
1777
1778 - def commit(self):
1779 if self.connection: 1780 return self.connection.commit()
1781
1782 - def rollback(self):
1783 if self.connection: 1784 return self.connection.rollback()
1785
1786 - def close_connection(self):
1787 if self.connection: 1788 r = self.connection.close() 1789 self.connection = None 1790 return r
1791
1792 - def distributed_transaction_begin(self, key):
1793 return
1794
1795 - def prepare(self, key):
1796 if self.connection: self.connection.prepare()
1797
1798 - def commit_prepared(self, key):
1799 if self.connection: self.connection.commit()
1800
1801 - def rollback_prepared(self, key):
1802 if self.connection: self.connection.rollback()
1803
1804 - def concat_add(self, tablename):
1805 return ', ADD '
1806
1807 - def constraint_name(self, table, fieldname):
1808 return '%s_%s__constraint' % (table,fieldname)
1809
1810 - def create_sequence_and_triggers(self, query, table, **args):
1811 self.execute(query)
1812
1813 - def log_execute(self, *a, **b):
1814 if not self.connection: return None 1815 command = a[0] 1816 if hasattr(self,'filter_sql_command'): 1817 command = self.filter_sql_command(command) 1818 if self.db._debug: 1819 LOGGER.debug('SQL: %s' % command) 1820 self.db._lastsql = command 1821 t0 = time.time() 1822 ret = self.cursor.execute(command, *a[1:], **b) 1823 self.db._timings.append((command,time.time()-t0)) 1824 del self.db._timings[:-TIMINGSSIZE] 1825 return ret
1826
1827 - def execute(self, *a, **b):
1828 return self.log_execute(*a, **b)
1829
1830 - def represent(self, obj, fieldtype):
1831 field_is_type = fieldtype.startswith 1832 if isinstance(obj, CALLABLETYPES): 1833 obj = obj() 1834 if isinstance(fieldtype, SQLCustomType): 1835 value = fieldtype.encoder(obj) 1836 if fieldtype.type in ('string','text', 'json'): 1837 return self.adapt(value) 1838 return value 1839 if isinstance(obj, (Expression, Field)): 1840 return str(obj) 1841 if field_is_type('list:'): 1842 if not obj: 1843 obj = [] 1844 elif not isinstance(obj, (list, tuple)): 1845 obj = [obj] 1846 if field_is_type('list:string'): 1847 obj = map(str,obj) 1848 else: 1849 obj = map(int,[o for o in obj if o != '']) 1850 # we don't want to bar_encode json objects 1851 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1852 obj = bar_encode(obj) 1853 if obj is None: 1854 return 'NULL' 1855 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1856 return 'NULL' 1857 r = self.represent_exceptions(obj, fieldtype) 1858 if not r is None: 1859 return r 1860 if fieldtype == 'boolean': 1861 if obj and not str(obj)[:1].upper() in '0F': 1862 return self.smart_adapt(self.TRUE) 1863 else: 1864 return self.smart_adapt(self.FALSE) 1865 if fieldtype == 'id' or fieldtype == 'integer': 1866 return str(long(obj)) 1867 if field_is_type('decimal'): 1868 return str(obj) 1869 elif field_is_type('reference'): # reference 1870 if fieldtype.find('.')>0: 1871 return repr(obj) 1872 elif isinstance(obj, (Row, Reference)): 1873 return str(obj['id']) 1874 return str(long(obj)) 1875 elif fieldtype == 'double': 1876 return repr(float(obj)) 1877 if isinstance(obj, unicode): 1878 obj = obj.encode(self.db_codec) 1879 if fieldtype == 'blob': 1880 obj = base64.b64encode(str(obj)) 1881 elif fieldtype == 'date': 1882 if isinstance(obj, (datetime.date, datetime.datetime)): 1883 obj = obj.isoformat()[:10] 1884 else: 1885 obj = str(obj) 1886 elif fieldtype == 'datetime': 1887 if isinstance(obj, datetime.datetime): 1888 obj = obj.isoformat(self.T_SEP)[:19] 1889 elif isinstance(obj, datetime.date): 1890 obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00' 1891 else: 1892 obj = str(obj) 1893 elif fieldtype == 'time': 1894 if isinstance(obj, datetime.time): 1895 obj = obj.isoformat()[:10] 1896 else: 1897 obj = str(obj) 1898 elif fieldtype == 'json': 1899 if not self.native_json: 1900 if have_serializers: 1901 obj = serializers.json(obj) 1902 elif simplejson: 1903 obj = simplejson.dumps(obj) 1904 else: 1905 raise RuntimeError("missing simplejson") 1906 if not isinstance(obj,bytes): 1907 obj = bytes(obj) 1908 try: 1909 obj.decode(self.db_codec) 1910 except: 1911 obj = obj.decode('latin1').encode(self.db_codec) 1912 return self.adapt(obj)
1913
1914 - def represent_exceptions(self, obj, fieldtype):
1915 return None
1916
1917 - def lastrowid(self, table):
1918 return None
1919
1920 - def rowslice(self, rows, minimum=0, maximum=None):
1921 """ 1922 By default this function does nothing; 1923 overload when db does not do slicing. 1924 """ 1925 return rows
1926
1927 - def parse_value(self, value, field_type, blob_decode=True):
1928 if field_type != 'blob' and isinstance(value, str): 1929 try: 1930 value = value.decode(self.db._db_codec) 1931 except Exception: 1932 pass 1933 if isinstance(value, unicode): 1934 value = value.encode('utf-8') 1935 if isinstance(field_type, SQLCustomType): 1936 value = field_type.decoder(value) 1937 if not isinstance(field_type, str) or value is None: 1938 return value 1939 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1940 return value 1941 elif field_type.startswith('geo'): 1942 return value 1943 elif field_type == 'blob' and not blob_decode: 1944 return value 1945 else: 1946 key = REGEX_TYPE.match(field_type).group(0) 1947 return self.parsemap[key](value,field_type)
1948
1949 - def parse_reference(self, value, field_type):
1950 referee = field_type[10:].strip() 1951 if not '.' in referee: 1952 value = Reference(value) 1953 value._table, value._record = self.db[referee], None 1954 return value
1955
1956 - def parse_boolean(self, value, field_type):
1957 return value == self.TRUE or str(value)[:1].lower() == 't'
1958
1959 - def parse_date(self, value, field_type):
1960 if isinstance(value, datetime.datetime): 1961 return value.date() 1962 if not isinstance(value, (datetime.date,datetime.datetime)): 1963 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1964 value = datetime.date(y, m, d) 1965 return value
1966
1967 - def parse_time(self, value, field_type):
1968 if not isinstance(value, datetime.time): 1969 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1970 if len(time_items) == 3: 1971 (h, mi, s) = time_items 1972 else: 1973 (h, mi, s) = time_items + [0] 1974 value = datetime.time(h, mi, s) 1975 return value
1976
1977 - def parse_datetime(self, value, field_type):
1978 if not isinstance(value, datetime.datetime): 1979 value = str(value) 1980 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1981 if '+' in timezone: 1982 ms,tz = timezone.split('+') 1983 h,m = tz.split(':') 1984 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1985 elif '-' in timezone: 1986 ms,tz = timezone.split('-') 1987 h,m = tz.split(':') 1988 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1989 else: 1990 dt = None 1991 (y, m, d) = map(int,date_part.split('-')) 1992 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 1993 while len(time_parts)<3: time_parts.append(0) 1994 time_items = map(int,time_parts) 1995 (h, mi, s) = time_items 1996 value = datetime.datetime(y, m, d, h, mi, s) 1997 if dt: 1998 value = value + dt 1999 return value
2000
2001 - def parse_blob(self, value, field_type):
2002 return base64.b64decode(str(value))
2003
2004 - def parse_decimal(self, value, field_type):
2005 decimals = int(field_type[8:-1].split(',')[-1]) 2006 if self.dbengine in ('sqlite', 'spatialite'): 2007 value = ('%.' + str(decimals) + 'f') % value 2008 if not isinstance(value, decimal.Decimal): 2009 value = decimal.Decimal(str(value)) 2010 return value
2011
2012 - def parse_list_integers(self, value, field_type):
2013 if not isinstance(self, NoSQLAdapter): 2014 value = bar_decode_integer(value) 2015 return value
2016
2017 - def parse_list_references(self, value, field_type):
2018 if not isinstance(self, NoSQLAdapter): 2019 value = bar_decode_integer(value) 2020 return [self.parse_reference(r, field_type[5:]) for r in value]
2021
2022 - def parse_list_strings(self, value, field_type):
2023 if not isinstance(self, NoSQLAdapter): 2024 value = bar_decode_string(value) 2025 return value
2026
2027 - def parse_id(self, value, field_type):
2028 return long(value)
2029
2030 - def parse_integer(self, value, field_type):
2031 return long(value)
2032
2033 - def parse_double(self, value, field_type):
2034 return float(value)
2035
2036 - def parse_json(self, value, field_type):
2037 if not self.native_json: 2038 if not isinstance(value, basestring): 2039 raise RuntimeError('json data not a string') 2040 if isinstance(value, unicode): 2041 value = value.encode('utf-8') 2042 if have_serializers: 2043 value = serializers.loads_json(value) 2044 elif simplejson: 2045 value = simplejson.loads(value) 2046 else: 2047 raise RuntimeError("missing simplejson") 2048 return value
2049
2050 - def build_parsemap(self):
2051 self.parsemap = { 2052 'id':self.parse_id, 2053 'integer':self.parse_integer, 2054 'bigint':self.parse_integer, 2055 'float':self.parse_double, 2056 'double':self.parse_double, 2057 'reference':self.parse_reference, 2058 'boolean':self.parse_boolean, 2059 'date':self.parse_date, 2060 'time':self.parse_time, 2061 'datetime':self.parse_datetime, 2062 'blob':self.parse_blob, 2063 'decimal':self.parse_decimal, 2064 'json':self.parse_json, 2065 'list:integer':self.parse_list_integers, 2066 'list:reference':self.parse_list_references, 2067 'list:string':self.parse_list_strings, 2068 }
2069
2070 - def parse(self, rows, fields, colnames, blob_decode=True, 2071 cacheable = False):
2072 db = self.db 2073 virtualtables = [] 2074 new_rows = [] 2075 tmps = [] 2076 for colname in colnames: 2077 if not REGEX_TABLE_DOT_FIELD.match(colname): 2078 tmps.append(None) 2079 else: 2080 (tablename, _the_sep_, fieldname) = colname.partition('.') 2081 table = db[tablename] 2082 field = table[fieldname] 2083 ft = field.type 2084 tmps.append((tablename,fieldname,table,field,ft)) 2085 for (i,row) in enumerate(rows): 2086 new_row = Row() 2087 for (j,colname) in enumerate(colnames): 2088 value = row[j] 2089 tmp = tmps[j] 2090 if tmp: 2091 (tablename,fieldname,table,field,ft) = tmp 2092 if tablename in new_row: 2093 colset = new_row[tablename] 2094 else: 2095 colset = new_row[tablename] = Row() 2096 if tablename not in virtualtables: 2097 virtualtables.append(tablename) 2098 value = self.parse_value(value,ft,blob_decode) 2099 if field.filter_out: 2100 value = field.filter_out(value) 2101 colset[fieldname] = value 2102 2103 # for backward compatibility 2104 if ft=='id' and fieldname!='id' and \ 2105 not 'id' in table.fields: 2106 colset['id'] = value 2107 2108 if ft == 'id' and not cacheable: 2109 # temporary hack to deal with 2110 # GoogleDatastoreAdapter 2111 # references 2112 if isinstance(self, GoogleDatastoreAdapter): 2113 id = value.key().id_or_name() 2114 colset[fieldname] = id 2115 colset.gae_item = value 2116 else: 2117 id = value 2118 colset.update_record = RecordUpdater(colset,table,id) 2119 colset.delete_record = RecordDeleter(table,id) 2120 if table._db._lazy_tables: 2121 colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id) 2122 for rfield in table._referenced_by: 2123 referee_link = db._referee_name and \ 2124 db._referee_name % dict( 2125 table=rfield.tablename,field=rfield.name) 2126 if referee_link and not referee_link in colset: 2127 colset[referee_link] = LazySet(rfield,id) 2128 else: 2129 if not '_extra' in new_row: 2130 new_row['_extra'] = Row() 2131 new_row['_extra'][colname] = \ 2132 self.parse_value(value, 2133 fields[j].type,blob_decode) 2134 new_column_name = \ 2135 REGEX_SELECT_AS_PARSER.search(colname) 2136 if not new_column_name is None: 2137 column_name = new_column_name.groups(0) 2138 setattr(new_row,column_name[0],value) 2139 new_rows.append(new_row) 2140 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2141 2142 2143 for tablename in virtualtables: 2144 table = db[tablename] 2145 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2146 if isinstance(v,FieldVirtual)] 2147 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2148 if isinstance(v,FieldMethod)] 2149 if fields_virtual or fields_lazy: 2150 for row in rowsobj.records: 2151 box = row[tablename] 2152 for f,v in fields_virtual: 2153 box[f] = v.f(row) 2154 for f,v in fields_lazy: 2155 box[f] = (v.handler or VirtualCommand)(v.f,row) 2156 2157 ### old style virtual fields 2158 for item in table.virtualfields: 2159 try: 2160 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2161 except (KeyError, AttributeError): 2162 # to avoid breaking virtualfields when partial select 2163 pass 2164 return rowsobj
2165
2166 - def common_filter(self, query, tablenames):
2167 tenant_fieldname = self.db._request_tenant 2168 2169 for tablename in tablenames: 2170 table = self.db[tablename] 2171 2172 # deal with user provided filters 2173 if table._common_filter != None: 2174 query = query & table._common_filter(query) 2175 2176 # deal with multi_tenant filters 2177 if tenant_fieldname in table: 2178 default = table[tenant_fieldname].default 2179 if not default is None: 2180 newquery = table[tenant_fieldname] == default 2181 if query is None: 2182 query = newquery 2183 else: 2184 query = query & newquery 2185 return query
2186
2187 - def CASE(self,query,t,f):
2188 def represent(x): 2189 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2190 if x is None: return 'NULL' 2191 elif isinstance(x,Expression): return str(x) 2192 else: return self.represent(x,types.get(type(x),'string'))
2193 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2194 (self.expand(query),represent(t),represent(f))) 2195
2196 ################################################################################### 2197 # List of all the available adapters; they all extend BaseAdapter. 2198 ################################################################################### 2199 2200 -class SQLiteAdapter(BaseAdapter):
2201 drivers = ('sqlite2','sqlite3') 2202 2203 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2204
2205 - def EXTRACT(self,field,what):
2206 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2207 2208 @staticmethod
2209 - def web2py_extract(lookup, s):
2210 table = { 2211 'year': (0, 4), 2212 'month': (5, 7), 2213 'day': (8, 10), 2214 'hour': (11, 13), 2215 'minute': (14, 16), 2216 'second': (17, 19), 2217 } 2218 try: 2219 if lookup != 'epoch': 2220 (i, j) = table[lookup] 2221 return int(s[i:j]) 2222 else: 2223 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2224 except: 2225 return None
2226 2227 @staticmethod
2228 - def web2py_regexp(expression, item):
2229 return re.compile(expression).search(item) is not None
2230
2231 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2232 credential_decoder=IDENTITY, driver_args={}, 2233 adapter_args={}, do_connect=True, after_connection=None):
2234 self.db = db 2235 self.dbengine = "sqlite" 2236 self.uri = uri 2237 if do_connect: self.find_driver(adapter_args) 2238 self.pool_size = 0 2239 self.folder = folder 2240 self.db_codec = db_codec 2241 self._after_connection = after_connection 2242 self.find_or_make_work_folder() 2243 path_encoding = sys.getfilesystemencoding() \ 2244 or locale.getdefaultlocale()[1] or 'utf8' 2245 if uri.startswith('sqlite:memory'): 2246 self.dbpath = ':memory:' 2247 else: 2248 self.dbpath = uri.split('://',1)[1] 2249 if self.dbpath[0] != '/': 2250 if PYTHON_VERSION == 2: 2251 self.dbpath = pjoin( 2252 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2253 else: 2254 self.dbpath = pjoin(self.folder, self.dbpath) 2255 if not 'check_same_thread' in driver_args: 2256 driver_args['check_same_thread'] = False 2257 if not 'detect_types' in driver_args and do_connect: 2258 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2259 def connector(dbpath=self.dbpath, driver_args=driver_args): 2260 return self.driver.Connection(dbpath, **driver_args)
2261 self.connector = connector 2262 if do_connect: self.reconnect()
2263
2264 - def after_connection(self):
2265 self.connection.create_function('web2py_extract', 2, 2266 SQLiteAdapter.web2py_extract) 2267 self.connection.create_function("REGEXP", 2, 2268 SQLiteAdapter.web2py_regexp)
2269
2270 - def _truncate(self, table, mode=''):
2271 tablename = table._tablename 2272 return ['DELETE FROM %s;' % tablename, 2273 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2274
2275 - def lastrowid(self, table):
2276 return self.cursor.lastrowid
2277
2278 - def REGEXP(self,first,second):
2279 return '(%s REGEXP %s)' % (self.expand(first), 2280 self.expand(second,'string'))
2281
2282 - def select(self, query, fields, attributes):
2283 """ 2284 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2285 Note that the entire database, rather than one record, is locked 2286 (it will be locked eventually anyway by the following UPDATE). 2287 """ 2288 if attributes.get('for_update', False) and not 'cache' in attributes: 2289 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2290 return super(SQLiteAdapter, self).select(query, fields, attributes)
2291
2292 -class SpatiaLiteAdapter(SQLiteAdapter):
2293 drivers = ('sqlite3','sqlite2') 2294 2295 types = copy.copy(BaseAdapter.types) 2296 types.update(geometry='GEOMETRY') 2297
2298 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2299 credential_decoder=IDENTITY, driver_args={}, 2300 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2301 self.db = db 2302 self.dbengine = "spatialite" 2303 self.uri = uri 2304 if do_connect: self.find_driver(adapter_args) 2305 self.pool_size = 0 2306 self.folder = folder 2307 self.db_codec = db_codec 2308 self._after_connection = after_connection 2309 self.find_or_make_work_folder() 2310 self.srid = srid 2311 path_encoding = sys.getfilesystemencoding() \ 2312 or locale.getdefaultlocale()[1] or 'utf8' 2313 if uri.startswith('spatialite:memory'): 2314 self.dbpath = ':memory:' 2315 else: 2316 self.dbpath = uri.split('://',1)[1] 2317 if self.dbpath[0] != '/': 2318 self.dbpath = pjoin( 2319 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2320 if not 'check_same_thread' in driver_args: 2321 driver_args['check_same_thread'] = False 2322 if not 'detect_types' in driver_args and do_connect: 2323 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2324 def connector(dbpath=self.dbpath, driver_args=driver_args): 2325 return self.driver.Connection(dbpath, **driver_args)
2326 self.connector = connector 2327 if do_connect: self.reconnect()
2328
2329 - def after_connection(self):
2330 self.connection.enable_load_extension(True) 2331 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2332 # Linux uses libspatialite.so 2333 # Mac OS X uses libspatialite.dylib 2334 libspatialite = SPATIALLIBS[platform.system()] 2335 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2336 2337 self.connection.create_function('web2py_extract', 2, 2338 SQLiteAdapter.web2py_extract) 2339 self.connection.create_function("REGEXP", 2, 2340 SQLiteAdapter.web2py_regexp)
2341 2342 # GIS functions 2343
2344 - def ST_ASGEOJSON(self, first, second):
2345 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2346 second['precision'], second['options'])
2347
2348 - def ST_ASTEXT(self, first):
2349 return 'AsText(%s)' %(self.expand(first))
2350
2351 - def ST_CONTAINS(self, first, second):
2352 return 'Contains(%s,%s)' %(self.expand(first), 2353 self.expand(second, first.type))
2354
2355 - def ST_DISTANCE(self, first, second):
2356 return 'Distance(%s,%s)' %(self.expand(first), 2357 self.expand(second, first.type))
2358
2359 - def ST_EQUALS(self, first, second):
2360 return 'Equals(%s,%s)' %(self.expand(first), 2361 self.expand(second, first.type))
2362
2363 - def ST_INTERSECTS(self, first, second):
2364 return 'Intersects(%s,%s)' %(self.expand(first), 2365 self.expand(second, first.type))
2366
2367 - def ST_OVERLAPS(self, first, second):
2368 return 'Overlaps(%s,%s)' %(self.expand(first), 2369 self.expand(second, first.type))
2370
2371 - def ST_SIMPLIFY(self, first, second):
2372 return 'Simplify(%s,%s)' %(self.expand(first), 2373 self.expand(second, 'double'))
2374
2375 - def ST_TOUCHES(self, first, second):
2376 return 'Touches(%s,%s)' %(self.expand(first), 2377 self.expand(second, first.type))
2378
2379 - def ST_WITHIN(self, first, second):
2380 return 'Within(%s,%s)' %(self.expand(first), 2381 self.expand(second, first.type))
2382
2383 - def represent(self, obj, fieldtype):
2384 field_is_type = fieldtype.startswith 2385 if field_is_type('geo'): 2386 srid = 4326 # Spatialite default srid for geometry 2387 geotype, parms = fieldtype[:-1].split('(') 2388 parms = parms.split(',') 2389 if len(parms) >= 2: 2390 schema, srid = parms[:2] 2391 # if field_is_type('geometry'): 2392 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2393 # elif field_is_type('geography'): 2394 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2395 # else: 2396 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2397 return value 2398 return BaseAdapter.represent(self, obj, fieldtype)
2399
2400 2401 -class JDBCSQLiteAdapter(SQLiteAdapter):
2402 drivers = ('zxJDBC_sqlite',) 2403
2404 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2405 credential_decoder=IDENTITY, driver_args={}, 2406 adapter_args={}, do_connect=True, after_connection=None):
2407 self.db = db 2408 self.dbengine = "sqlite" 2409 self.uri = uri 2410 if do_connect: self.find_driver(adapter_args) 2411 self.pool_size = pool_size 2412 self.folder = folder 2413 self.db_codec = db_codec 2414 self._after_connection = after_connection 2415 self.find_or_make_work_folder() 2416 path_encoding = sys.getfilesystemencoding() \ 2417 or locale.getdefaultlocale()[1] or 'utf8' 2418 if uri.startswith('sqlite:memory'): 2419 self.dbpath = ':memory:' 2420 else: 2421 self.dbpath = uri.split('://',1)[1] 2422 if self.dbpath[0] != '/': 2423 self.dbpath = pjoin( 2424 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2425 def connector(dbpath=self.dbpath,driver_args=driver_args): 2426 return self.driver.connect( 2427 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2428 **driver_args)
2429 self.connector = connector 2430 if do_connect: self.reconnect()
2431
2432 - def after_connection(self):
2433 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2434 self.connection.create_function('web2py_extract', 2, 2435 SQLiteAdapter.web2py_extract)
2436
2437 - def execute(self, a):
2438 return self.log_execute(a)
2439
2440 2441 -class MySQLAdapter(BaseAdapter):
2442 drivers = ('MySQLdb','pymysql') 2443 2444 commit_on_alter_table = True 2445 support_distributed_transaction = True 2446 types = { 2447 'boolean': 'CHAR(1)', 2448 'string': 'VARCHAR(%(length)s)', 2449 'text': 'LONGTEXT', 2450 'json': 'LONGTEXT', 2451 'password': 'VARCHAR(%(length)s)', 2452 'blob': 'LONGBLOB', 2453 'upload': 'VARCHAR(%(length)s)', 2454 'integer': 'INT', 2455 'bigint': 'BIGINT', 2456 'float': 'FLOAT', 2457 'double': 'DOUBLE', 2458 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2459 'date': 'DATE', 2460 'time': 'TIME', 2461 'datetime': 'DATETIME', 2462 'id': 'INT AUTO_INCREMENT NOT NULL', 2463 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2464 'list:integer': 'LONGTEXT', 2465 'list:string': 'LONGTEXT', 2466 'list:reference': 'LONGTEXT', 2467 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2468 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2469 } 2470 2471 QUOTE_TEMPLATE = "`%s`" 2472
2473 - def varquote(self,name):
2474 return varquote_aux(name,'`%s`')
2475
2476 - def RANDOM(self):
2477 return 'RAND()'
2478
2479 - def SUBSTRING(self,field,parameters):
2480 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2481 parameters[0], parameters[1])
2482
2483 - def EPOCH(self, first):
2484 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2485
2486 - def CONCAT(self, *items):
2487 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2488
2489 - def REGEXP(self,first,second):
2490 return '(%s REGEXP %s)' % (self.expand(first), 2491 self.expand(second,'string'))
2492
2493 - def _drop(self,table,mode):
2494 # breaks db integrity but without this mysql does not drop table 2495 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2496 'SET FOREIGN_KEY_CHECKS=1;']
2497
2498 - def _insert_empty(self, table):
2499 return 'INSERT INTO %s VALUES (DEFAULT);' % table
2500
2501 - def distributed_transaction_begin(self,key):
2502 self.execute('XA START;')
2503
2504 - def prepare(self,key):
2505 self.execute("XA END;") 2506 self.execute("XA PREPARE;")
2507
2508 - def commit_prepared(self,ley):
2509 self.execute("XA COMMIT;")
2510
2511 - def rollback_prepared(self,key):
2512 self.execute("XA ROLLBACK;")
2513 2514 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2515
2516 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2517 credential_decoder=IDENTITY, driver_args={}, 2518 adapter_args={}, do_connect=True, after_connection=None):
2519 self.db = db 2520 self.dbengine = "mysql" 2521 self.uri = uri 2522 if do_connect: self.find_driver(adapter_args,uri) 2523 self.pool_size = pool_size 2524 self.folder = folder 2525 self.db_codec = db_codec 2526 self._after_connection = after_connection 2527 self.find_or_make_work_folder() 2528 ruri = uri.split('://',1)[1] 2529 m = self.REGEX_URI.match(ruri) 2530 if not m: 2531 raise SyntaxError( 2532 "Invalid URI string in DAL: %s" % self.uri) 2533 user = credential_decoder(m.group('user')) 2534 if not user: 2535 raise SyntaxError('User required') 2536 password = credential_decoder(m.group('password')) 2537 if not password: 2538 password = '' 2539 host = m.group('host') 2540 if not host: 2541 raise SyntaxError('Host name required') 2542 db = m.group('db') 2543 if not db: 2544 raise SyntaxError('Database name required') 2545 port = int(m.group('port') or '3306') 2546 charset = m.group('charset') or 'utf8' 2547 driver_args.update(db=db, 2548 user=credential_decoder(user), 2549 passwd=credential_decoder(password), 2550 host=host, 2551 port=port, 2552 charset=charset) 2553 2554 2555 def connector(driver_args=driver_args): 2556 return self.driver.connect(**driver_args)
2557 self.connector = connector 2558 if do_connect: self.reconnect()
2559
2560 - def after_connection(self):
2561 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2562 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2563
2564 - def lastrowid(self,table):
2565 self.execute('select last_insert_id();') 2566 return int(self.cursor.fetchone()[0])
2567
2568 2569 -class PostgreSQLAdapter(BaseAdapter):
2570 drivers = ('psycopg2','pg8000') 2571 2572 support_distributed_transaction = True 2573 types = { 2574 'boolean': 'CHAR(1)', 2575 'string': 'VARCHAR(%(length)s)', 2576 'text': 'TEXT', 2577 'json': 'TEXT', 2578 'password': 'VARCHAR(%(length)s)', 2579 'blob': 'BYTEA', 2580 'upload': 'VARCHAR(%(length)s)', 2581 'integer': 'INTEGER', 2582 'bigint': 'BIGINT', 2583 'float': 'FLOAT', 2584 'double': 'FLOAT8', 2585 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2586 'date': 'DATE', 2587 'time': 'TIME', 2588 'datetime': 'TIMESTAMP', 2589 'id': 'SERIAL PRIMARY KEY', 2590 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2591 'list:integer': 'TEXT', 2592 'list:string': 'TEXT', 2593 'list:reference': 'TEXT', 2594 'geometry': 'GEOMETRY', 2595 'geography': 'GEOGRAPHY', 2596 'big-id': 'BIGSERIAL PRIMARY KEY', 2597 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2598 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2599 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2600 2601 } 2602 2603 QUOTE_TEMPLATE = '%s' 2604
2605 - def varquote(self,name):
2606 return varquote_aux(name,'"%s"')
2607
2608 - def adapt(self,obj):
2609 if self.driver_name == 'psycopg2': 2610 return psycopg2_adapt(obj).getquoted() 2611 elif self.driver_name == 'pg8000': 2612 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2613 else: 2614 return "'%s'" % str(obj).replace("'","''")
2615
2616 - def sequence_name(self,table):
2617 return '%s_id_Seq' % table
2618
2619 - def RANDOM(self):
2620 return 'RANDOM()'
2621
2622 - def ADD(self, first, second):
2623 t = first.type 2624 if t in ('text','string','password', 'json', 'upload','blob'): 2625 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2626 else: 2627 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2628
2629 - def distributed_transaction_begin(self,key):
2630 return
2631
2632 - def prepare(self,key):
2633 self.execute("PREPARE TRANSACTION '%s';" % key)
2634
2635 - def commit_prepared(self,key):
2636 self.execute("COMMIT PREPARED '%s';" % key)
2637
2638 - def rollback_prepared(self,key):
2639 self.execute("ROLLBACK PREPARED '%s';" % key)
2640
2641 - def create_sequence_and_triggers(self, query, table, **args):
2642 # following lines should only be executed if table._sequence_name does not exist 2643 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2644 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2645 # % (table._tablename, table._fieldname, table._sequence_name)) 2646 self.execute(query)
2647 2648 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2649
2650 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2651 credential_decoder=IDENTITY, driver_args={}, 2652 adapter_args={}, do_connect=True, srid=4326, 2653 after_connection=None):
2654 self.db = db 2655 self.dbengine = "postgres" 2656 self.uri = uri 2657 if do_connect: self.find_driver(adapter_args,uri) 2658 self.pool_size = pool_size 2659 self.folder = folder 2660 self.db_codec = db_codec 2661 self._after_connection = after_connection 2662 self.srid = srid 2663 self.find_or_make_work_folder() 2664 ruri = uri.split('://',1)[1] 2665 m = self.REGEX_URI.match(ruri) 2666 if not m: 2667 raise SyntaxError("Invalid URI string in DAL") 2668 user = credential_decoder(m.group('user')) 2669 if not user: 2670 raise SyntaxError('User required') 2671 password = credential_decoder(m.group('password')) 2672 if not password: 2673 password = '' 2674 host = m.group('host') 2675 if not host: 2676 raise SyntaxError('Host name required') 2677 db = m.group('db') 2678 if not db: 2679 raise SyntaxError('Database name required') 2680 port = m.group('port') or '5432' 2681 sslmode = m.group('sslmode') 2682 if sslmode: 2683 msg = ("dbname='%s' user='%s' host='%s' " 2684 "port=%s password='%s' sslmode='%s'") \ 2685 % (db, user, host, port, password, sslmode) 2686 else: 2687 msg = ("dbname='%s' user='%s' host='%s' " 2688 "port=%s password='%s'") \ 2689 % (db, user, host, port, password) 2690 # choose diver according uri 2691 if self.driver: 2692 self.__version__ = "%s %s" % (self.driver.__name__, 2693 self.driver.__version__) 2694 else: 2695 self.__version__ = None 2696 def connector(msg=msg,driver_args=driver_args): 2697 return self.driver.connect(msg,**driver_args)
2698 self.connector = connector 2699 if do_connect: self.reconnect()
2700
2701 - def after_connection(self):
2702 self.connection.set_client_encoding('UTF8') 2703 self.execute("SET standard_conforming_strings=on;") 2704 self.try_json()
2705
2706 - def lastrowid(self,table):
2707 self.execute("select currval('%s')" % table._sequence_name) 2708 return int(self.cursor.fetchone()[0])
2709
2710 - def try_json(self):
2711 # check JSON data type support 2712 # (to be added to after_connection) 2713 if self.driver_name == "pg8000": 2714 supports_json = self.connection.server_version >= "9.2.0" 2715 elif (self.driver_name == "psycopg2") and \ 2716 (self.driver.__version__ >= "2.0.12"): 2717 supports_json = self.connection.server_version >= 90200 2718 elif self.driver_name == "zxJDBC": 2719 supports_json = self.connection.dbversion >= "9.2.0" 2720 else: supports_json = None 2721 if supports_json: 2722 self.types["json"] = "JSON" 2723 self.native_json = True 2724 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2725
2726 - def LIKE(self,first,second):
2727 args = (self.expand(first), self.expand(second,'string')) 2728 if not first.type in ('string', 'text', 'json'): 2729 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2730 else: 2731 return '(%s LIKE %s)' % args
2732
2733 - def ILIKE(self,first,second):
2734 args = (self.expand(first), self.expand(second,'string')) 2735 if not first.type in ('string', 'text', 'json'): 2736 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2737 else: 2738 return '(%s ILIKE %s)' % args
2739
2740 - def REGEXP(self,first,second):
2741 return '(%s ~ %s)' % (self.expand(first), 2742 self.expand(second,'string'))
2743
2744 - def STARTSWITH(self,first,second):
2745 return '(%s ILIKE %s)' % (self.expand(first), 2746 self.expand(second+'%','string'))
2747
2748 - def ENDSWITH(self,first,second):
2749 return '(%s ILIKE %s)' % (self.expand(first), 2750 self.expand('%'+second,'string'))
2751 2752 # GIS functions 2753
2754 - def ST_ASGEOJSON(self, first, second):
2755 """ 2756 http://postgis.org/docs/ST_AsGeoJSON.html 2757 """ 2758 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2759 self.expand(first), second['precision'], second['options'])
2760
2761 - def ST_ASTEXT(self, first):
2762 """ 2763 http://postgis.org/docs/ST_AsText.html 2764 """ 2765 return 'ST_AsText(%s)' %(self.expand(first))
2766
2767 - def ST_X(self, first):
2768 """ 2769 http://postgis.org/docs/ST_X.html 2770 """ 2771 return 'ST_X(%s)' %(self.expand(first))
2772
2773 - def ST_Y(self, first):
2774 """ 2775 http://postgis.org/docs/ST_Y.html 2776 """ 2777 return 'ST_Y(%s)' %(self.expand(first))
2778
2779 - def ST_CONTAINS(self, first, second):
2780 """ 2781 http://postgis.org/docs/ST_Contains.html 2782 """ 2783 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2784
2785 - def ST_DISTANCE(self, first, second):
2786 """ 2787 http://postgis.org/docs/ST_Distance.html 2788 """ 2789 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2790
2791 - def ST_EQUALS(self, first, second):
2792 """ 2793 http://postgis.org/docs/ST_Equals.html 2794 """ 2795 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2796
2797 - def ST_INTERSECTS(self, first, second):
2798 """ 2799 http://postgis.org/docs/ST_Intersects.html 2800 """ 2801 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2802
2803 - def ST_OVERLAPS(self, first, second):
2804 """ 2805 http://postgis.org/docs/ST_Overlaps.html 2806 """ 2807 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2808
2809 - def ST_SIMPLIFY(self, first, second):
2810 """ 2811 http://postgis.org/docs/ST_Simplify.html 2812 """ 2813 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2814
2815 - def ST_TOUCHES(self, first, second):
2816 """ 2817 http://postgis.org/docs/ST_Touches.html 2818 """ 2819 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2820
2821 - def ST_WITHIN(self, first, second):
2822 """ 2823 http://postgis.org/docs/ST_Within.html 2824 """ 2825 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2826
2827 - def represent(self, obj, fieldtype):
2828 field_is_type = fieldtype.startswith 2829 if field_is_type('geo'): 2830 srid = 4326 # postGIS default srid for geometry 2831 geotype, parms = fieldtype[:-1].split('(') 2832 parms = parms.split(',') 2833 if len(parms) >= 2: 2834 schema, srid = parms[:2] 2835 if field_is_type('geometry'): 2836 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2837 elif field_is_type('geography'): 2838 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2839 # else: 2840 # raise SyntaxError('Invalid field type %s' %fieldtype) 2841 return value 2842 return BaseAdapter.represent(self, obj, fieldtype)
2843
2844 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2845 drivers = ('psycopg2','pg8000') 2846 2847 types = { 2848 'boolean': 'CHAR(1)', 2849 'string': 'VARCHAR(%(length)s)', 2850 'text': 'TEXT', 2851 'json': 'TEXT', 2852 'password': 'VARCHAR(%(length)s)', 2853 'blob': 'BYTEA', 2854 'upload': 'VARCHAR(%(length)s)', 2855 'integer': 'INTEGER', 2856 'bigint': 'BIGINT', 2857 'float': 'FLOAT', 2858 'double': 'FLOAT8', 2859 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2860 'date': 'DATE', 2861 'time': 'TIME', 2862 'datetime': 'TIMESTAMP', 2863 'id': 'SERIAL PRIMARY KEY', 2864 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2865 'list:integer': 'BIGINT[]', 2866 'list:string': 'TEXT[]', 2867 'list:reference': 'BIGINT[]', 2868 'geometry': 'GEOMETRY', 2869 'geography': 'GEOGRAPHY', 2870 'big-id': 'BIGSERIAL PRIMARY KEY', 2871 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2872 } 2873
2874 - def parse_list_integers(self, value, field_type):
2875 return value
2876
2877 - def parse_list_references(self, value, field_type):
2878 return [self.parse_reference(r, field_type[5:]) for r in value]
2879
2880 - def parse_list_strings(self, value, field_type):
2881 return value
2882
2883 - def represent(self, obj, fieldtype):
2884 field_is_type = fieldtype.startswith 2885 if field_is_type('list:'): 2886 if not obj: 2887 obj = [] 2888 elif not isinstance(obj, (list, tuple)): 2889 obj = [obj] 2890 if field_is_type('list:string'): 2891 obj = map(str,obj) 2892 else: 2893 obj = map(int,obj) 2894 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2895 return BaseAdapter.represent(self, obj, fieldtype)
2896
2897 2898 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2899 drivers = ('zxJDBC',) 2900 2901 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2902
2903 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2904 credential_decoder=IDENTITY, driver_args={}, 2905 adapter_args={}, do_connect=True, after_connection=None ):
2906 self.db = db 2907 self.dbengine = "postgres" 2908 self.uri = uri 2909 if do_connect: self.find_driver(adapter_args,uri) 2910 self.pool_size = pool_size 2911 self.folder = folder 2912 self.db_codec = db_codec 2913 self._after_connection = after_connection 2914 self.find_or_make_work_folder() 2915 ruri = uri.split('://',1)[1] 2916 m = self.REGEX_URI.match(ruri) 2917 if not m: 2918 raise SyntaxError("Invalid URI string in DAL") 2919 user = credential_decoder(m.group('user')) 2920 if not user: 2921 raise SyntaxError('User required') 2922 password = credential_decoder(m.group('password')) 2923 if not password: 2924 password = '' 2925 host = m.group('host') 2926 if not host: 2927 raise SyntaxError('Host name required') 2928 db = m.group('db') 2929 if not db: 2930 raise SyntaxError('Database name required') 2931 port = m.group('port') or '5432' 2932 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2933 def connector(msg=msg,driver_args=driver_args): 2934 return self.driver.connect(*msg,**driver_args)
2935 self.connector = connector 2936 if do_connect: self.reconnect()
2937
2938 - def after_connection(self):
2939 self.connection.set_client_encoding('UTF8') 2940 self.execute('BEGIN;') 2941 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2942 self.try_json()
2943
2944 2945 -class OracleAdapter(BaseAdapter):
2946 drivers = ('cx_Oracle',) 2947 2948 commit_on_alter_table = False 2949 types = { 2950 'boolean': 'CHAR(1)', 2951 'string': 'VARCHAR2(%(length)s)', 2952 'text': 'CLOB', 2953 'json': 'CLOB', 2954 'password': 'VARCHAR2(%(length)s)', 2955 'blob': 'CLOB', 2956 'upload': 'VARCHAR2(%(length)s)', 2957 'integer': 'INT', 2958 'bigint': 'NUMBER', 2959 'float': 'FLOAT', 2960 'double': 'BINARY_DOUBLE', 2961 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2962 'date': 'DATE', 2963 'time': 'CHAR(8)', 2964 'datetime': 'DATE', 2965 'id': 'NUMBER PRIMARY KEY', 2966 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2967 'list:integer': 'CLOB', 2968 'list:string': 'CLOB', 2969 'list:reference': 'CLOB', 2970 'big-id': 'NUMBER PRIMARY KEY', 2971 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2972 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2973 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2974 } 2975
2976 - def sequence_name(self,tablename):
2977 return '%s_sequence' % tablename
2978
2979 - def trigger_name(self,tablename):
2980 return '%s_trigger' % tablename
2981
2982 - def LEFT_JOIN(self):
2983 return 'LEFT OUTER JOIN'
2984
2985 - def RANDOM(self):
2986 return 'dbms_random.value'
2987
2988 - def NOT_NULL(self,default,field_type):
2989 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2990
2991 - def _drop(self,table,mode):
2992 sequence_name = table._sequence_name 2993 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
2994
2995 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2996 if limitby: 2997 (lmin, lmax) = limitby 2998 if len(sql_w) > 1: 2999 sql_w_row = sql_w + ' AND w_row > %i' % lmin 3000 else: 3001 sql_w_row = 'WHERE w_row > %i' % lmin 3002 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 3003 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3004
3005 - def constraint_name(self, tablename, fieldname):
3006 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 3007 if len(constraint_name)>30: 3008 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 3009 return constraint_name
3010
3011 - def represent_exceptions(self, obj, fieldtype):
3012 if fieldtype == 'blob': 3013 obj = base64.b64encode(str(obj)) 3014 return ":CLOB('%s')" % obj 3015 elif fieldtype == 'date': 3016 if isinstance(obj, (datetime.date, datetime.datetime)): 3017 obj = obj.isoformat()[:10] 3018 else: 3019 obj = str(obj) 3020 return "to_date('%s','yyyy-mm-dd')" % obj 3021 elif fieldtype == 'datetime': 3022 if isinstance(obj, datetime.datetime): 3023 obj = obj.isoformat()[:19].replace('T',' ') 3024 elif isinstance(obj, datetime.date): 3025 obj = obj.isoformat()[:10]+' 00:00:00' 3026 else: 3027 obj = str(obj) 3028 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 3029 return None
3030
3031 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3032 credential_decoder=IDENTITY, driver_args={}, 3033 adapter_args={}, do_connect=True, after_connection=None):
3034 self.db = db 3035 self.dbengine = "oracle" 3036 self.uri = uri 3037 if do_connect: self.find_driver(adapter_args,uri) 3038 self.pool_size = pool_size 3039 self.folder = folder 3040 self.db_codec = db_codec 3041 self._after_connection = after_connection 3042 self.find_or_make_work_folder() 3043 ruri = uri.split('://',1)[1] 3044 if not 'threaded' in driver_args: 3045 driver_args['threaded']=True 3046 def connector(uri=ruri,driver_args=driver_args): 3047 return self.driver.connect(uri,**driver_args)
3048 self.connector = connector 3049 if do_connect: self.reconnect()
3050
3051 - def after_connection(self):
3052 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 3053 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
3054 3055 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 3056
3057 - def execute(self, command, args=None):
3058 args = args or [] 3059 i = 1 3060 while True: 3061 m = self.oracle_fix.match(command) 3062 if not m: 3063 break 3064 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 3065 args.append(m.group('clob')[6:-2].replace("''", "'")) 3066 i += 1 3067 if command[-1:]==';': 3068 command = command[:-1] 3069 return self.log_execute(command, args)
3070
3071 - def create_sequence_and_triggers(self, query, table, **args):
3072 tablename = table._tablename 3073 id_name = table._id.name 3074 sequence_name = table._sequence_name 3075 trigger_name = table._trigger_name 3076 self.execute(query) 3077 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3078 self.execute(""" 3079 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3080 DECLARE 3081 curr_val NUMBER; 3082 diff_val NUMBER; 3083 PRAGMA autonomous_transaction; 3084 BEGIN 3085 IF :NEW.%(id)s IS NOT NULL THEN 3086 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3087 diff_val := :NEW.%(id)s - curr_val - 1; 3088 IF diff_val != 0 THEN 3089 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3090 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3091 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3092 END IF; 3093 END IF; 3094 SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; 3095 END; 3096 """ % dict(trigger_name=trigger_name, tablename=tablename, 3097 sequence_name=sequence_name,id=id_name))
3098
3099 - def lastrowid(self,table):
3100 sequence_name = table._sequence_name 3101 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3102 return long(self.cursor.fetchone()[0])
3103 3104 #def parse_value(self, value, field_type, blob_decode=True): 3105 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3106 # try: 3107 # value = value.read() 3108 # except self.driver.ProgrammingError: 3109 # # After a subsequent fetch the LOB value is not valid anymore 3110 # pass 3111 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3112
3113 - def _fetchall(self):
3114 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3115 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3116 for c in r]) for r in self.cursor] 3117 else: 3118 return self.cursor.fetchall()
3119
3120 -class MSSQLAdapter(BaseAdapter):
3121 drivers = ('pyodbc',) 3122 T_SEP = 'T' 3123 3124 QUOTE_TEMPLATE = "[%s]" 3125 3126 types = { 3127 'boolean': 'BIT', 3128 'string': 'VARCHAR(%(length)s)', 3129 'text': 'TEXT', 3130 'json': 'TEXT', 3131 'password': 'VARCHAR(%(length)s)', 3132 'blob': 'IMAGE', 3133 'upload': 'VARCHAR(%(length)s)', 3134 'integer': 'INT', 3135 'bigint': 'BIGINT', 3136 'float': 'FLOAT', 3137 'double': 'FLOAT', 3138 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3139 'date': 'DATETIME', 3140 'time': 'CHAR(8)', 3141 'datetime': 'DATETIME', 3142 'id': 'INT IDENTITY PRIMARY KEY', 3143 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3144 'list:integer': 'TEXT', 3145 'list:string': 'TEXT', 3146 'list:reference': 'TEXT', 3147 'geometry': 'geometry', 3148 'geography': 'geography', 3149 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3150 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3151 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3152 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3153 } 3154
3155 - def concat_add(self,tablename):
3156 return '; ALTER TABLE %s ADD ' % tablename
3157
3158 - def varquote(self,name):
3159 return varquote_aux(name,'[%s]')
3160
3161 - def EXTRACT(self,field,what):
3162 return "DATEPART(%s,%s)" % (what, self.expand(field))
3163
3164 - def LEFT_JOIN(self):
3165 return 'LEFT OUTER JOIN'
3166
3167 - def RANDOM(self):
3168 return 'NEWID()'
3169
3170 - def ALLOW_NULL(self):
3171 return ' NULL'
3172
3173 - def SUBSTRING(self,field,parameters):
3174 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3175
3176 - def PRIMARY_KEY(self,key):
3177 return 'PRIMARY KEY CLUSTERED (%s)' % key
3178
3179 - def AGGREGATE(self, first, what):
3180 if what == 'LENGTH': 3181 what = 'LEN' 3182 return "%s(%s)" % (what, self.expand(first))
3183 3184
3185 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3186 if limitby: 3187 (lmin, lmax) = limitby 3188 sql_s += ' TOP %i' % lmax 3189 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3190 3191 TRUE = 1 3192 FALSE = 0 3193 3194 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3195 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3196 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3197
3198 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3199 credential_decoder=IDENTITY, driver_args={}, 3200 adapter_args={}, do_connect=True, srid=4326, 3201 after_connection=None):
3202 self.db = db 3203 self.dbengine = "mssql" 3204 self.uri = uri 3205 if do_connect: self.find_driver(adapter_args,uri) 3206 self.pool_size = pool_size 3207 self.folder = folder 3208 self.db_codec = db_codec 3209 self._after_connection = after_connection 3210 self.srid = srid 3211 self.find_or_make_work_folder() 3212 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3213 ruri = uri.split('://',1)[1] 3214 if '@' not in ruri: 3215 try: 3216 m = self.REGEX_DSN.match(ruri) 3217 if not m: 3218 raise SyntaxError( 3219 'Parsing uri string(%s) has no result' % self.uri) 3220 dsn = m.group('dsn') 3221 if not dsn: 3222 raise SyntaxError('DSN required') 3223 except SyntaxError: 3224 e = sys.exc_info()[1] 3225 LOGGER.error('NdGpatch error') 3226 raise e 3227 # was cnxn = 'DSN=%s' % dsn 3228 cnxn = dsn 3229 else: 3230 m = self.REGEX_URI.match(ruri) 3231 if not m: 3232 raise SyntaxError( 3233 "Invalid URI string in DAL: %s" % self.uri) 3234 user = credential_decoder(m.group('user')) 3235 if not user: 3236 raise SyntaxError('User required') 3237 password = credential_decoder(m.group('password')) 3238 if not password: 3239 password = '' 3240 host = m.group('host') 3241 if not host: 3242 raise SyntaxError('Host name required') 3243 db = m.group('db') 3244 if not db: 3245 raise SyntaxError('Database name required') 3246 port = m.group('port') or '1433' 3247 # Parse the optional url name-value arg pairs after the '?' 3248 # (in the form of arg1=value1&arg2=value2&...) 3249 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3250 argsdict = { 'DRIVER':'{SQL Server}' } 3251 urlargs = m.group('urlargs') or '' 3252 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3253 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3254 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3255 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3256 % (host, port, db, user, password, urlargs) 3257 def connector(cnxn=cnxn,driver_args=driver_args): 3258 return self.driver.connect(cnxn,**driver_args)
3259 self.connector = connector 3260 if do_connect: self.reconnect()
3261
3262 - def lastrowid(self,table):
3263 #self.execute('SELECT @@IDENTITY;') 3264 self.execute('SELECT SCOPE_IDENTITY();') 3265 return long(self.cursor.fetchone()[0])
3266
3267 - def rowslice(self,rows,minimum=0,maximum=None):
3268 if maximum is None: 3269 return rows[minimum:] 3270 return rows[minimum:maximum]
3271
3272 - def EPOCH(self, first):
3273 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3274
3275 - def CONCAT(self, *items):
3276 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3277 3278 # GIS Spatial Extensions 3279 3280 # No STAsGeoJSON in MSSQL 3281
3282 - def ST_ASTEXT(self, first):
3283 return '%s.STAsText()' %(self.expand(first))
3284
3285 - def ST_CONTAINS(self, first, second):
3286 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3287
3288 - def ST_DISTANCE(self, first, second):
3289 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3290
3291 - def ST_EQUALS(self, first, second):
3292 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3293
3294 - def ST_INTERSECTS(self, first, second):
3295 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3296
3297 - def ST_OVERLAPS(self, first, second):
3298 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3299 3300 # no STSimplify in MSSQL 3301
3302 - def ST_TOUCHES(self, first, second):
3303 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3304
3305 - def ST_WITHIN(self, first, second):
3306 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3307
3308 - def represent(self, obj, fieldtype):
3309 field_is_type = fieldtype.startswith 3310 if field_is_type('geometry'): 3311 srid = 0 # MS SQL default srid for geometry 3312 geotype, parms = fieldtype[:-1].split('(') 3313 if parms: 3314 srid = parms 3315 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3316 elif fieldtype == 'geography': 3317 srid = 4326 # MS SQL default srid for geography 3318 geotype, parms = fieldtype[:-1].split('(') 3319 if parms: 3320 srid = parms 3321 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3322 # else: 3323 # raise SyntaxError('Invalid field type %s' %fieldtype) 3324 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3325 return BaseAdapter.represent(self, obj, fieldtype)
3326
3327 3328 -class MSSQL3Adapter(MSSQLAdapter):
3329 """ experimental support for pagination in MSSQL"""
3330 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3331 if limitby: 3332 (lmin, lmax) = limitby 3333 if lmin == 0: 3334 sql_s += ' TOP %i' % lmax 3335 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3336 lmin += 1 3337 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3338 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3339 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3340 sql_f_inner = [f for f in sql_f.split(',')] 3341 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3342 sql_f_iproxy = ', '.join(sql_f_iproxy) 3343 sql_f_oproxy = ', '.join(sql_f_outer) 3344 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3345 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3346 - def rowslice(self,rows,minimum=0,maximum=None):
3347 return rows
3348
3349 3350 -class MSSQL2Adapter(MSSQLAdapter):
3351 drivers = ('pyodbc',) 3352 3353 types = { 3354 'boolean': 'CHAR(1)', 3355 'string': 'NVARCHAR(%(length)s)', 3356 'text': 'NTEXT', 3357 'json': 'NTEXT', 3358 'password': 'NVARCHAR(%(length)s)', 3359 'blob': 'IMAGE', 3360 'upload': 'NVARCHAR(%(length)s)', 3361 'integer': 'INT', 3362 'bigint': 'BIGINT', 3363 'float': 'FLOAT', 3364 'double': 'FLOAT', 3365 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3366 'date': 'DATETIME', 3367 'time': 'CHAR(8)', 3368 'datetime': 'DATETIME', 3369 'id': 'INT IDENTITY PRIMARY KEY', 3370 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3371 'list:integer': 'NTEXT', 3372 'list:string': 'NTEXT', 3373 'list:reference': 'NTEXT', 3374 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3375 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3376 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3377 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3378 } 3379
3380 - def represent(self, obj, fieldtype):
3381 value = BaseAdapter.represent(self, obj, fieldtype) 3382 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3383 value = 'N'+value 3384 return value
3385
3386 - def execute(self,a):
3387 return self.log_execute(a.decode('utf8'))
3388
3389 -class VerticaAdapter(MSSQLAdapter):
3390 drivers = ('pyodbc',) 3391 T_SEP = ' ' 3392 3393 types = { 3394 'boolean': 'BOOLEAN', 3395 'string': 'VARCHAR(%(length)s)', 3396 'text': 'BYTEA', 3397 'json': 'VARCHAR(%(length)s)', 3398 'password': 'VARCHAR(%(length)s)', 3399 'blob': 'BYTEA', 3400 'upload': 'VARCHAR(%(length)s)', 3401 'integer': 'INT', 3402 'bigint': 'BIGINT', 3403 'float': 'FLOAT', 3404 'double': 'DOUBLE PRECISION', 3405 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3406 'date': 'DATE', 3407 'time': 'TIME', 3408 'datetime': 'DATETIME', 3409 'id': 'IDENTITY', 3410 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3411 'list:integer': 'BYTEA', 3412 'list:string': 'BYTEA', 3413 'list:reference': 'BYTEA', 3414 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3415 } 3416 3417
3418 - def EXTRACT(self, first, what):
3419 return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
3420
3421 - def _truncate(self, table, mode=''):
3422 tablename = table._tablename 3423 return ['TRUNCATE %s %s;' % (tablename, mode or '')]
3424
3425 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3426 if limitby: 3427 (lmin, lmax) = limitby 3428 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 3429 return 'SELECT %s %s FROM %s%s%s;' % \ 3430 (sql_s, sql_f, sql_t, sql_w, sql_o)
3431
3432 - def lastrowid(self,table):
3433 self.execute('SELECT LAST_INSERT_ID();') 3434 return long(self.cursor.fetchone()[0])
3435
3436 - def execute(self, a):
3437 return self.log_execute(a)
3438
3439 -class SybaseAdapter(MSSQLAdapter):
3440 drivers = ('Sybase',) 3441 3442 types = { 3443 'boolean': 'BIT', 3444 'string': 'CHAR VARYING(%(length)s)', 3445 'text': 'TEXT', 3446 'json': 'TEXT', 3447 'password': 'CHAR VARYING(%(length)s)', 3448 'blob': 'IMAGE', 3449 'upload': 'CHAR VARYING(%(length)s)', 3450 'integer': 'INT', 3451 'bigint': 'BIGINT', 3452 'float': 'FLOAT', 3453 'double': 'FLOAT', 3454 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3455 'date': 'DATETIME', 3456 'time': 'CHAR(8)', 3457 'datetime': 'DATETIME', 3458 'id': 'INT IDENTITY PRIMARY KEY', 3459 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3460 'list:integer': 'TEXT', 3461 'list:string': 'TEXT', 3462 'list:reference': 'TEXT', 3463 'geometry': 'geometry', 3464 'geography': 'geography', 3465 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3466 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3467 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3468 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3469 } 3470 3471
3472 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3473 credential_decoder=IDENTITY, driver_args={}, 3474 adapter_args={}, do_connect=True, srid=4326, 3475 after_connection=None):
3476 self.db = db 3477 self.dbengine = "sybase" 3478 self.uri = uri 3479 if do_connect: self.find_driver(adapter_args,uri) 3480 self.pool_size = pool_size 3481 self.folder = folder 3482 self.db_codec = db_codec 3483 self._after_connection = after_connection 3484 self.srid = srid 3485 self.find_or_make_work_folder() 3486 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3487 ruri = uri.split('://',1)[1] 3488 if '@' not in ruri: 3489 try: 3490 m = self.REGEX_DSN.match(ruri) 3491 if not m: 3492 raise SyntaxError( 3493 'Parsing uri string(%s) has no result' % self.uri) 3494 dsn = m.group('dsn') 3495 if not dsn: 3496 raise SyntaxError('DSN required') 3497 except SyntaxError: 3498 e = sys.exc_info()[1] 3499 LOGGER.error('NdGpatch error') 3500 raise e 3501 else: 3502 m = self.REGEX_URI.match(uri) 3503 if not m: 3504 raise SyntaxError( 3505 "Invalid URI string in DAL: %s" % self.uri) 3506 user = credential_decoder(m.group('user')) 3507 if not user: 3508 raise SyntaxError('User required') 3509 password = credential_decoder(m.group('password')) 3510 if not password: 3511 password = '' 3512 host = m.group('host') 3513 if not host: 3514 raise SyntaxError('Host name required') 3515 db = m.group('db') 3516 if not db: 3517 raise SyntaxError('Database name required') 3518 port = m.group('port') or '1433' 3519 3520 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3521 3522 driver_args.update(user = credential_decoder(user), 3523 password = credential_decoder(password)) 3524 3525 def connector(dsn=dsn,driver_args=driver_args): 3526 return self.driver.connect(dsn,**driver_args)
3527 self.connector = connector 3528 if do_connect: self.reconnect()
3529
3530 3531 -class FireBirdAdapter(BaseAdapter):
3532 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3533 3534 commit_on_alter_table = False 3535 support_distributed_transaction = True 3536 types = { 3537 'boolean': 'CHAR(1)', 3538 'string': 'VARCHAR(%(length)s)', 3539 'text': 'BLOB SUB_TYPE 1', 3540 'json': 'BLOB SUB_TYPE 1', 3541 'password': 'VARCHAR(%(length)s)', 3542 'blob': 'BLOB SUB_TYPE 0', 3543 'upload': 'VARCHAR(%(length)s)', 3544 'integer': 'INTEGER', 3545 'bigint': 'BIGINT', 3546 'float': 'FLOAT', 3547 'double': 'DOUBLE PRECISION', 3548 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3549 'date': 'DATE', 3550 'time': 'TIME', 3551 'datetime': 'TIMESTAMP', 3552 'id': 'INTEGER PRIMARY KEY', 3553 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3554 'list:integer': 'BLOB SUB_TYPE 1', 3555 'list:string': 'BLOB SUB_TYPE 1', 3556 'list:reference': 'BLOB SUB_TYPE 1', 3557 'big-id': 'BIGINT PRIMARY KEY', 3558 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3559 } 3560
3561 - def sequence_name(self,tablename):
3562 return 'genid_%s' % tablename
3563
3564 - def trigger_name(self,tablename):
3565 return 'trg_id_%s' % tablename
3566
3567 - def RANDOM(self):
3568 return 'RAND()'
3569
3570 - def EPOCH(self, first):
3571 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3572
3573 - def NOT_NULL(self,default,field_type):
3574 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3575
3576 - def SUBSTRING(self,field,parameters):
3577 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3578
3579 - def LENGTH(self, first):
3580 return "CHAR_LENGTH(%s)" % self.expand(first)
3581
3582 - def CONTAINS(self,first,second,case_sensitive=False):
3583 if first.type.startswith('list:'): 3584 second = Expression(None,self.CONCAT('|',Expression( 3585 None,self.REPLACE(second,('|','||'))),'|')) 3586 return '(%s CONTAINING %s)' % (self.expand(first), 3587 self.expand(second, 'string'))
3588
3589 - def _drop(self,table,mode):
3590 sequence_name = table._sequence_name 3591 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3592
3593 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3594 if limitby: 3595 (lmin, lmax) = limitby 3596 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3597 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3598
3599 - def _truncate(self,table,mode = ''):
3600 return ['DELETE FROM %s;' % table._tablename, 3601 'SET GENERATOR %s TO 0;' % table._sequence_name]
3602 3603 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3604
3605 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3606 credential_decoder=IDENTITY, driver_args={}, 3607 adapter_args={}, do_connect=True, after_connection=None):
3608 self.db = db 3609 self.dbengine = "firebird" 3610 self.uri = uri 3611 if do_connect: self.find_driver(adapter_args,uri) 3612 self.pool_size = pool_size 3613 self.folder = folder 3614 self.db_codec = db_codec 3615 self._after_connection = after_connection 3616 self.find_or_make_work_folder() 3617 ruri = uri.split('://',1)[1] 3618 m = self.REGEX_URI.match(ruri) 3619 if not m: 3620 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3621 user = credential_decoder(m.group('user')) 3622 if not user: 3623 raise SyntaxError('User required') 3624 password = credential_decoder(m.group('password')) 3625 if not password: 3626 password = '' 3627 host = m.group('host') 3628 if not host: 3629 raise SyntaxError('Host name required') 3630 port = int(m.group('port') or 3050) 3631 db = m.group('db') 3632 if not db: 3633 raise SyntaxError('Database name required') 3634 charset = m.group('charset') or 'UTF8' 3635 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3636 user = credential_decoder(user), 3637 password = credential_decoder(password), 3638 charset = charset) 3639 3640 def connector(driver_args=driver_args): 3641 return self.driver.connect(**driver_args)
3642 self.connector = connector 3643 if do_connect: self.reconnect()
3644
3645 - def create_sequence_and_triggers(self, query, table, **args):
3646 tablename = table._tablename 3647 sequence_name = table._sequence_name 3648 trigger_name = table._trigger_name 3649 self.execute(query) 3650 self.execute('create generator %s;' % sequence_name) 3651 self.execute('set generator %s to 0;' % sequence_name) 3652 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3653
3654 - def lastrowid(self,table):
3655 sequence_name = table._sequence_name 3656 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3657 return long(self.cursor.fetchone()[0])
3658
3659 3660 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3661 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3662 3663 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3664
3665 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3666 credential_decoder=IDENTITY, driver_args={}, 3667 adapter_args={}, do_connect=True, after_connection=None):
3668 self.db = db 3669 self.dbengine = "firebird" 3670 self.uri = uri 3671 if do_connect: self.find_driver(adapter_args,uri) 3672 self.pool_size = pool_size 3673 self.folder = folder 3674 self.db_codec = db_codec 3675 self._after_connection = after_connection 3676 self.find_or_make_work_folder() 3677 ruri = uri.split('://',1)[1] 3678 m = self.REGEX_URI.match(ruri) 3679 if not m: 3680 raise SyntaxError( 3681 "Invalid URI string in DAL: %s" % self.uri) 3682 user = credential_decoder(m.group('user')) 3683 if not user: 3684 raise SyntaxError('User required') 3685 password = credential_decoder(m.group('password')) 3686 if not password: 3687 password = '' 3688 pathdb = m.group('path') 3689 if not pathdb: 3690 raise SyntaxError('Path required') 3691 charset = m.group('charset') 3692 if not charset: 3693 charset = 'UTF8' 3694 host = '' 3695 driver_args.update(host=host, 3696 database=pathdb, 3697 user=credential_decoder(user), 3698 password=credential_decoder(password), 3699 charset=charset) 3700 3701 def connector(driver_args=driver_args): 3702 return self.driver.connect(**driver_args)
3703 self.connector = connector 3704 if do_connect: self.reconnect()
3705
3706 -class InformixAdapter(BaseAdapter):
3707 drivers = ('informixdb',) 3708 3709 types = { 3710 'boolean': 'CHAR(1)', 3711 'string': 'VARCHAR(%(length)s)', 3712 'text': 'BLOB SUB_TYPE 1', 3713 'json': 'BLOB SUB_TYPE 1', 3714 'password': 'VARCHAR(%(length)s)', 3715 'blob': 'BLOB SUB_TYPE 0', 3716 'upload': 'VARCHAR(%(length)s)', 3717 'integer': 'INTEGER', 3718 'bigint': 'BIGINT', 3719 'float': 'FLOAT', 3720 'double': 'DOUBLE PRECISION', 3721 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3722 'date': 'DATE', 3723 'time': 'CHAR(8)', 3724 'datetime': 'DATETIME', 3725 'id': 'SERIAL', 3726 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3727 'list:integer': 'BLOB SUB_TYPE 1', 3728 'list:string': 'BLOB SUB_TYPE 1', 3729 'list:reference': 'BLOB SUB_TYPE 1', 3730 'big-id': 'BIGSERIAL', 3731 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3732 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3733 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3734 } 3735
3736 - def RANDOM(self):
3737 return 'Random()'
3738
3739 - def NOT_NULL(self,default,field_type):
3740 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3741
3742 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3743 if limitby: 3744 (lmin, lmax) = limitby 3745 fetch_amt = lmax - lmin 3746 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3747 if lmin and (dbms_version >= 10): 3748 # Requires Informix 10.0+ 3749 sql_s += ' SKIP %d' % (lmin, ) 3750 if fetch_amt and (dbms_version >= 9): 3751 # Requires Informix 9.0+ 3752 sql_s += ' FIRST %d' % (fetch_amt, ) 3753 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3754
3755 - def represent_exceptions(self, obj, fieldtype):
3756 if fieldtype == 'date': 3757 if isinstance(obj, (datetime.date, datetime.datetime)): 3758 obj = obj.isoformat()[:10] 3759 else: 3760 obj = str(obj) 3761 return "to_date('%s','%%Y-%%m-%%d')" % obj 3762 elif fieldtype == 'datetime': 3763 if isinstance(obj, datetime.datetime): 3764 obj = obj.isoformat()[:19].replace('T',' ') 3765 elif isinstance(obj, datetime.date): 3766 obj = obj.isoformat()[:10]+' 00:00:00' 3767 else: 3768 obj = str(obj) 3769 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3770 return None
3771 3772 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3773
3774 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3775 credential_decoder=IDENTITY, driver_args={}, 3776 adapter_args={}, do_connect=True, after_connection=None):
3777 self.db = db 3778 self.dbengine = "informix" 3779 self.uri = uri 3780 if do_connect: self.find_driver(adapter_args,uri) 3781 self.pool_size = pool_size 3782 self.folder = folder 3783 self.db_codec = db_codec 3784 self._after_connection = after_connection 3785 self.find_or_make_work_folder() 3786 ruri = uri.split('://',1)[1] 3787 m = self.REGEX_URI.match(ruri) 3788 if not m: 3789 raise SyntaxError( 3790 "Invalid URI string in DAL: %s" % self.uri) 3791 user = credential_decoder(m.group('user')) 3792 if not user: 3793 raise SyntaxError('User required') 3794 password = credential_decoder(m.group('password')) 3795 if not password: 3796 password = '' 3797 host = m.group('host') 3798 if not host: 3799 raise SyntaxError('Host name required') 3800 db = m.group('db') 3801 if not db: 3802 raise SyntaxError('Database name required') 3803 user = credential_decoder(user) 3804 password = credential_decoder(password) 3805 dsn = '%s@%s' % (db,host) 3806 driver_args.update(user=user,password=password,autocommit=True) 3807 def connector(dsn=dsn,driver_args=driver_args): 3808 return self.driver.connect(dsn,**driver_args)
3809 self.connector = connector 3810 if do_connect: self.reconnect()
3811
3812 - def execute(self,command):
3813 if command[-1:]==';': 3814 command = command[:-1] 3815 return self.log_execute(command)
3816
3817 - def lastrowid(self,table):
3818 return self.cursor.sqlerrd[1]
3819
3820 -class InformixSEAdapter(InformixAdapter):
3821 """ work in progress """ 3822
3823 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3824 return 'SELECT %s %s FROM %s%s%s;' % \ 3825 (sql_s, sql_f, sql_t, sql_w, sql_o)
3826
3827 - def rowslice(self,rows,minimum=0,maximum=None):
3828 if maximum is None: 3829 return rows[minimum:] 3830 return rows[minimum:maximum]
3831
3832 -class DB2Adapter(BaseAdapter):
3833 drivers = ('pyodbc',) 3834 3835 types = { 3836 'boolean': 'CHAR(1)', 3837 'string': 'VARCHAR(%(length)s)', 3838 'text': 'CLOB', 3839 'json': 'CLOB', 3840 'password': 'VARCHAR(%(length)s)', 3841 'blob': 'BLOB', 3842 'upload': 'VARCHAR(%(length)s)', 3843 'integer': 'INT', 3844 'bigint': 'BIGINT', 3845 'float': 'REAL', 3846 'double': 'DOUBLE', 3847 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3848 'date': 'DATE', 3849 'time': 'TIME', 3850 'datetime': 'TIMESTAMP', 3851 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3852 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3853 'list:integer': 'CLOB', 3854 'list:string': 'CLOB', 3855 'list:reference': 'CLOB', 3856 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3857 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3858 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3859 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3860 } 3861
3862 - def LEFT_JOIN(self):
3863 return 'LEFT OUTER JOIN'
3864
3865 - def RANDOM(self):
3866 return 'RAND()'
3867
3868 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3869 if limitby: 3870 (lmin, lmax) = limitby 3871 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3872 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3873
3874 - def represent_exceptions(self, obj, fieldtype):
3875 if fieldtype == 'blob': 3876 obj = base64.b64encode(str(obj)) 3877 return "BLOB('%s')" % obj 3878 elif fieldtype == 'datetime': 3879 if isinstance(obj, datetime.datetime): 3880 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3881 elif isinstance(obj, datetime.date): 3882 obj = obj.isoformat()[:10]+'-00.00.00' 3883 return "'%s'" % obj 3884 return None
3885
3886 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3887 credential_decoder=IDENTITY, driver_args={}, 3888 adapter_args={}, do_connect=True, after_connection=None):
3889 self.db = db 3890 self.dbengine = "db2" 3891 self.uri = uri 3892 if do_connect: self.find_driver(adapter_args,uri) 3893 self.pool_size = pool_size 3894 self.folder = folder 3895 self.db_codec = db_codec 3896 self._after_connection = after_connection 3897 self.find_or_make_work_folder() 3898 ruri = uri.split('://', 1)[1] 3899 def connector(cnxn=ruri,driver_args=driver_args): 3900 return self.driver.connect(cnxn,**driver_args)
3901 self.connector = connector 3902 if do_connect: self.reconnect()
3903
3904 - def execute(self,command):
3905 if command[-1:]==';': 3906 command = command[:-1] 3907 return self.log_execute(command)
3908
3909 - def lastrowid(self,table):
3910 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3911 return long(self.cursor.fetchone()[0])
3912
3913 - def rowslice(self,rows,minimum=0,maximum=None):
3914 if maximum is None: 3915 return rows[minimum:] 3916 return rows[minimum:maximum]
3917
3918 3919 -class TeradataAdapter(BaseAdapter):
3920 drivers = ('pyodbc',) 3921 3922 types = { 3923 'boolean': 'CHAR(1)', 3924 'string': 'VARCHAR(%(length)s)', 3925 'text': 'CLOB', 3926 'json': 'CLOB', 3927 'password': 'VARCHAR(%(length)s)', 3928 'blob': 'BLOB', 3929 'upload': 'VARCHAR(%(length)s)', 3930 'integer': 'INT', 3931 'bigint': 'BIGINT', 3932 'float': 'REAL', 3933 'double': 'DOUBLE', 3934 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3935 'date': 'DATE', 3936 'time': 'TIME', 3937 'datetime': 'TIMESTAMP', 3938 # Modified Constraint syntax for Teradata. 3939 # Teradata does not support ON DELETE. 3940 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3941 'reference': 'INT', 3942 'list:integer': 'CLOB', 3943 'list:string': 'CLOB', 3944 'list:reference': 'CLOB', 3945 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3946 'big-reference': 'BIGINT', 3947 'reference FK': ' REFERENCES %(foreign_key)s', 3948 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3949 } 3950
3951 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3952 credential_decoder=IDENTITY, driver_args={}, 3953 adapter_args={}, do_connect=True, after_connection=None):
3954 self.db = db 3955 self.dbengine = "teradata" 3956 self.uri = uri 3957 if do_connect: self.find_driver(adapter_args,uri) 3958 self.pool_size = pool_size 3959 self.folder = folder 3960 self.db_codec = db_codec 3961 self._after_connection = after_connection 3962 self.find_or_make_work_folder() 3963 ruri = uri.split('://', 1)[1] 3964 def connector(cnxn=ruri,driver_args=driver_args): 3965 return self.driver.connect(cnxn,**driver_args)
3966 self.connector = connector 3967 if do_connect: self.reconnect()
3968
3969 - def LEFT_JOIN(self):
3970 return 'LEFT OUTER JOIN'
3971 3972 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
3973 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3974 if limitby: 3975 (lmin, lmax) = limitby 3976 sql_s += ' TOP %i' % lmax 3977 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3978
3979 - def _truncate(self, table, mode=''):
3980 tablename = table._tablename 3981 return ['DELETE FROM %s ALL;' % (tablename)]
3982 3983 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
3984 # (ANSI-SQL wants this form of name 3985 # to be a delimited identifier) 3986 3987 -class IngresAdapter(BaseAdapter):
3988 drivers = ('pyodbc',) 3989 3990 types = { 3991 'boolean': 'CHAR(1)', 3992 'string': 'VARCHAR(%(length)s)', 3993 'text': 'CLOB', 3994 'json': 'CLOB', 3995 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3996 'blob': 'BLOB', 3997 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 3998 'integer': 'INTEGER4', # or int8... 3999 'bigint': 'BIGINT', 4000 'float': 'FLOAT', 4001 'double': 'FLOAT8', 4002 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4003 'date': 'ANSIDATE', 4004 'time': 'TIME WITHOUT TIME ZONE', 4005 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4006 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 4007 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4008 'list:integer': 'CLOB', 4009 'list:string': 'CLOB', 4010 'list:reference': 'CLOB', 4011 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 4012 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4013 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4014 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4015 } 4016
4017 - def LEFT_JOIN(self):
4018 return 'LEFT OUTER JOIN'
4019
4020 - def RANDOM(self):
4021 return 'RANDOM()'
4022
4023 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4024 if limitby: 4025 (lmin, lmax) = limitby 4026 fetch_amt = lmax - lmin 4027 if fetch_amt: 4028 sql_s += ' FIRST %d ' % (fetch_amt, ) 4029 if lmin: 4030 # Requires Ingres 9.2+ 4031 sql_o += ' OFFSET %d' % (lmin, ) 4032 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4033
4034 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4035 credential_decoder=IDENTITY, driver_args={}, 4036 adapter_args={}, do_connect=True, after_connection=None):
4037 self.db = db 4038 self.dbengine = "ingres" 4039 self._driver = pyodbc 4040 self.uri = uri 4041 if do_connect: self.find_driver(adapter_args,uri) 4042 self.pool_size = pool_size 4043 self.folder = folder 4044 self.db_codec = db_codec 4045 self._after_connection = after_connection 4046 self.find_or_make_work_folder() 4047 connstr = uri.split(':', 1)[1] 4048 # Simple URI processing 4049 connstr = connstr.lstrip() 4050 while connstr.startswith('/'): 4051 connstr = connstr[1:] 4052 if '=' in connstr: 4053 # Assume we have a regular ODBC connection string and just use it 4054 ruri = connstr 4055 else: 4056 # Assume only (local) dbname is passed in with OS auth 4057 database_name = connstr 4058 default_driver_name = 'Ingres' 4059 vnode = '(local)' 4060 servertype = 'ingres' 4061 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 4062 def connector(cnxn=ruri,driver_args=driver_args): 4063 return self.driver.connect(cnxn,**driver_args)
4064 4065 self.connector = connector 4066 4067 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 4068 if do_connect: self.reconnect()
4069
4070 - def create_sequence_and_triggers(self, query, table, **args):
4071 # post create table auto inc code (if needed) 4072 # modify table to btree for performance.... 4073 # Older Ingres releases could use rule/trigger like Oracle above. 4074 if hasattr(table,'_primarykey'): 4075 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 4076 (table._tablename, 4077 ', '.join(["'%s'" % x for x in table.primarykey])) 4078 self.execute(modify_tbl_sql) 4079 else: 4080 tmp_seqname='%s_iisq' % table._tablename 4081 query=query.replace(INGRES_SEQNAME, tmp_seqname) 4082 self.execute('create sequence %s' % tmp_seqname) 4083 self.execute(query) 4084 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
4085 4086
4087 - def lastrowid(self,table):
4088 tmp_seqname='%s_iisq' % table 4089 self.execute('select current value for %s' % tmp_seqname) 4090 return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
4091
4092 4093 -class IngresUnicodeAdapter(IngresAdapter):
4094 4095 drivers = ('pyodbc',) 4096 4097 types = { 4098 'boolean': 'CHAR(1)', 4099 'string': 'NVARCHAR(%(length)s)', 4100 'text': 'NCLOB', 4101 'json': 'NCLOB', 4102 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4103 'blob': 'BLOB', 4104 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4105 'integer': 'INTEGER4', # or int8... 4106 'bigint': 'BIGINT', 4107 'float': 'FLOAT', 4108 'double': 'FLOAT8', 4109 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4110 'date': 'ANSIDATE', 4111 'time': 'TIME WITHOUT TIME ZONE', 4112 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4113 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4114 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4115 'list:integer': 'NCLOB', 4116 'list:string': 'NCLOB', 4117 'list:reference': 'NCLOB', 4118 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4119 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4120 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4121 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4122 }
4123
4124 -class SAPDBAdapter(BaseAdapter):
4125 drivers = ('sapdb',) 4126 4127 support_distributed_transaction = False 4128 types = { 4129 'boolean': 'CHAR(1)', 4130 'string': 'VARCHAR(%(length)s)', 4131 'text': 'LONG', 4132 'json': 'LONG', 4133 'password': 'VARCHAR(%(length)s)', 4134 'blob': 'LONG', 4135 'upload': 'VARCHAR(%(length)s)', 4136 'integer': 'INT', 4137 'bigint': 'BIGINT', 4138 'float': 'FLOAT', 4139 'double': 'DOUBLE PRECISION', 4140 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4141 'date': 'DATE', 4142 'time': 'TIME', 4143 'datetime': 'TIMESTAMP', 4144 'id': 'INT PRIMARY KEY', 4145 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4146 'list:integer': 'LONG', 4147 'list:string': 'LONG', 4148 'list:reference': 'LONG', 4149 'big-id': 'BIGINT PRIMARY KEY', 4150 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4151 } 4152
4153 - def sequence_name(self,table):
4154 return '%s_id_Seq' % table
4155
4156 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4157 if limitby: 4158 (lmin, lmax) = limitby 4159 if len(sql_w) > 1: 4160 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4161 else: 4162 sql_w_row = 'WHERE w_row > %i' % lmin 4163 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4164 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4165
4166 - def create_sequence_and_triggers(self, query, table, **args):
4167 # following lines should only be executed if table._sequence_name does not exist 4168 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4169 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4170 % (table._tablename, table._id.name, table._sequence_name)) 4171 self.execute(query)
4172 4173 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4174 4175
4176 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4177 credential_decoder=IDENTITY, driver_args={}, 4178 adapter_args={}, do_connect=True, after_connection=None):
4179 self.db = db 4180 self.dbengine = "sapdb" 4181 self.uri = uri 4182 if do_connect: self.find_driver(adapter_args,uri) 4183 self.pool_size = pool_size 4184 self.folder = folder 4185 self.db_codec = db_codec 4186 self._after_connection = after_connection 4187 self.find_or_make_work_folder() 4188 ruri = uri.split('://',1)[1] 4189 m = self.REGEX_URI.match(ruri) 4190 if not m: 4191 raise SyntaxError("Invalid URI string in DAL") 4192 user = credential_decoder(m.group('user')) 4193 if not user: 4194 raise SyntaxError('User required') 4195 password = credential_decoder(m.group('password')) 4196 if not password: 4197 password = '' 4198 host = m.group('host') 4199 if not host: 4200 raise SyntaxError('Host name required') 4201 db = m.group('db') 4202 if not db: 4203 raise SyntaxError('Database name required') 4204 def connector(user=user, password=password, database=db, 4205 host=host, driver_args=driver_args): 4206 return self.driver.Connection(user, password, database, 4207 host, **driver_args)
4208 self.connector = connector 4209 if do_connect: self.reconnect()
4210
4211 - def lastrowid(self,table):
4212 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4213 return long(self.cursor.fetchone()[0])
4214
4215 -class CubridAdapter(MySQLAdapter):
4216 drivers = ('cubriddb',) 4217 4218 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4219
4220 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4221 credential_decoder=IDENTITY, driver_args={}, 4222 adapter_args={}, do_connect=True, after_connection=None):
4223 self.db = db 4224 self.dbengine = "cubrid" 4225 self.uri = uri 4226 if do_connect: self.find_driver(adapter_args,uri) 4227 self.pool_size = pool_size 4228 self.folder = folder 4229 self.db_codec = db_codec 4230 self._after_connection = after_connection 4231 self.find_or_make_work_folder() 4232 ruri = uri.split('://',1)[1] 4233 m = self.REGEX_URI.match(ruri) 4234 if not m: 4235 raise SyntaxError( 4236 "Invalid URI string in DAL: %s" % self.uri) 4237 user = credential_decoder(m.group('user')) 4238 if not user: 4239 raise SyntaxError('User required') 4240 password = credential_decoder(m.group('password')) 4241 if not password: 4242 password = '' 4243 host = m.group('host') 4244 if not host: 4245 raise SyntaxError('Host name required') 4246 db = m.group('db') 4247 if not db: 4248 raise SyntaxError('Database name required') 4249 port = int(m.group('port') or '30000') 4250 charset = m.group('charset') or 'utf8' 4251 user = credential_decoder(user) 4252 passwd = credential_decoder(password) 4253 def connector(host=host,port=port,db=db, 4254 user=user,passwd=password,driver_args=driver_args): 4255 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4256 self.connector = connector 4257 if do_connect: self.reconnect()
4258
4259 - def after_connection(self):
4260 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4261 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4262
4263 4264 ######## GAE MySQL ########## 4265 4266 -class DatabaseStoredFile:
4267 4268 web2py_filesystem = False 4269
4270 - def escape(self,obj):
4271 return self.db._adapter.escape(obj)
4272
4273 - def __init__(self,db,filename,mode):
4274 if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'): 4275 raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now") 4276 self.db = db 4277 self.filename = filename 4278 self.mode = mode 4279 if not self.web2py_filesystem: 4280 if db._adapter.dbengine == 'mysql': 4281 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4282 elif db._adapter.dbengine in ('postgres', 'sqlite'): 4283 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4284 self.db.executesql(sql) 4285 DatabaseStoredFile.web2py_filesystem = True 4286 self.p=0 4287 self.data = '' 4288 if mode in ('r','rw','a'): 4289 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4290 % filename 4291 rows = self.db.executesql(query) 4292 if rows: 4293 self.data = rows[0][0] 4294 elif exists(filename): 4295 datafile = open(filename, 'r') 4296 try: 4297 self.data = datafile.read() 4298 finally: 4299 datafile.close() 4300 elif mode in ('r','rw'): 4301 raise RuntimeError("File %s does not exist" % filename)
4302
4303 - def read(self, bytes):
4304 data = self.data[self.p:self.p+bytes] 4305 self.p += len(data) 4306 return data
4307
4308 - def readline(self):
4309 i = self.data.find('\n',self.p)+1 4310 if i>0: 4311 data, self.p = self.data[self.p:i], i 4312 else: 4313 data, self.p = self.data[self.p:], len(self.data) 4314 return data
4315
4316 - def write(self,data):
4317 self.data += data
4318
4319 - def close_connection(self):
4320 if self.db is not None: 4321 self.db.executesql( 4322 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4323 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4324 % (self.filename, self.data.replace("'","''")) 4325 self.db.executesql(query) 4326 self.db.commit() 4327 self.db = None
4328
4329 - def close(self):
4330 self.close_connection()
4331 4332 @staticmethod
4333 - def exists(db, filename):
4334 if exists(filename): 4335 return True 4336 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4337 try: 4338 if db.executesql(query): 4339 return True 4340 except Exception, e: 4341 if not (db._adapter.isOperationalError(e) or 4342 db._adapter.isProgrammingError(e)): 4343 raise 4344 # no web2py_filesystem found? 4345 tb = traceback.format_exc() 4346 LOGGER.error("Could not retrieve %s\n%s" % (filename, tb)) 4347 return False
4348
4349 4350 -class UseDatabaseStoredFile:
4351
4352 - def file_exists(self, filename):
4353 return DatabaseStoredFile.exists(self.db,filename)
4354
4355 - def file_open(self, filename, mode='rb', lock=True):
4356 return DatabaseStoredFile(self.db,filename,mode)
4357
4358 - def file_close(self, fileobj):
4359 fileobj.close_connection()
4360
4361 - def file_delete(self,filename):
4362 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4363 self.db.executesql(query) 4364 self.db.commit()
4365
4366 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4367 uploads_in_blob = True 4368 4369 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4370
4371 - def __init__(self, db, uri='google:sql://realm:domain/database', 4372 pool_size=0, folder=None, db_codec='UTF-8', 4373 credential_decoder=IDENTITY, driver_args={}, 4374 adapter_args={}, do_connect=True, after_connection=None):
4375 4376 self.db = db 4377 self.dbengine = "mysql" 4378 self.uri = uri 4379 self.pool_size = pool_size 4380 self.db_codec = db_codec 4381 self._after_connection = after_connection 4382 if do_connect: self.find_driver(adapter_args, uri) 4383 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4384 os.sep+'applications'+os.sep,1)[1]) 4385 ruri = uri.split("://")[1] 4386 m = self.REGEX_URI.match(ruri) 4387 if not m: 4388 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4389 instance = credential_decoder(m.group('instance')) 4390 self.dbstring = db = credential_decoder(m.group('db')) 4391 driver_args['instance'] = instance 4392 if not 'charset' in driver_args: 4393 driver_args['charset'] = 'utf8' 4394 self.createdb = createdb = adapter_args.get('createdb',True) 4395 if not createdb: 4396 driver_args['database'] = db 4397 def connector(driver_args=driver_args): 4398 return rdbms.connect(**driver_args)
4399 self.connector = connector 4400 if do_connect: self.reconnect()
4401
4402 - def after_connection(self):
4403 if self.createdb: 4404 # self.execute('DROP DATABASE %s' % self.dbstring) 4405 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4406 self.execute('USE %s' % self.dbstring) 4407 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4408 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4409
4410 - def execute(self, command, *a, **b):
4411 return self.log_execute(command.decode('utf8'), *a, **b)
4412
4413 - def find_driver(self,adapter_args,uri=None):
4414 self.adapter_args = adapter_args 4415 self.driver = "google"
4416
4417 -class NoSQLAdapter(BaseAdapter):
4418 can_select_for_update = False 4419 4420 @staticmethod
4421 - def to_unicode(obj):
4422 if isinstance(obj, str): 4423 return obj.decode('utf8') 4424 elif not isinstance(obj, unicode): 4425 return unicode(obj) 4426 return obj
4427
4428 - def id_query(self, table):
4429 return table._id > 0
4430
4431 - def represent(self, obj, fieldtype):
4432 field_is_type = fieldtype.startswith 4433 if isinstance(obj, CALLABLETYPES): 4434 obj = obj() 4435 if isinstance(fieldtype, SQLCustomType): 4436 return fieldtype.encoder(obj) 4437 if isinstance(obj, (Expression, Field)): 4438 raise SyntaxError("non supported on GAE") 4439 if self.dbengine == 'google:datastore': 4440 if isinstance(fieldtype, gae.Property): 4441 return obj 4442 is_string = isinstance(fieldtype,str) 4443 is_list = is_string and field_is_type('list:') 4444 if is_list: 4445 if not obj: 4446 obj = [] 4447 if not isinstance(obj, (list, tuple)): 4448 obj = [obj] 4449 if obj == '' and not \ 4450 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4451 return None 4452 if not obj is None: 4453 if isinstance(obj, list) and not is_list: 4454 obj = [self.represent(o, fieldtype) for o in obj] 4455 elif fieldtype in ('integer','bigint','id'): 4456 obj = long(obj) 4457 elif fieldtype == 'double': 4458 obj = float(obj) 4459 elif is_string and field_is_type('reference'): 4460 if isinstance(obj, (Row, Reference)): 4461 obj = obj['id'] 4462 obj = long(obj) 4463 elif fieldtype == 'boolean': 4464 if obj and not str(obj)[0].upper() in '0F': 4465 obj = True 4466 else: 4467 obj = False 4468 elif fieldtype == 'date': 4469 if not isinstance(obj, datetime.date): 4470 (y, m, d) = map(int,str(obj).strip().split('-')) 4471 obj = datetime.date(y, m, d) 4472 elif isinstance(obj,datetime.datetime): 4473 (y, m, d) = (obj.year, obj.month, obj.day) 4474 obj = datetime.date(y, m, d) 4475 elif fieldtype == 'time': 4476 if not isinstance(obj, datetime.time): 4477 time_items = map(int,str(obj).strip().split(':')[:3]) 4478 if len(time_items) == 3: 4479 (h, mi, s) = time_items 4480 else: 4481 (h, mi, s) = time_items + [0] 4482 obj = datetime.time(h, mi, s) 4483 elif fieldtype == 'datetime': 4484 if not isinstance(obj, datetime.datetime): 4485 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4486 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4487 while len(time_items)<3: 4488 time_items.append(0) 4489 (h, mi, s) = time_items 4490 obj = datetime.datetime(y, m, d, h, mi, s) 4491 elif fieldtype == 'blob': 4492 pass 4493 elif fieldtype == 'json': 4494 if isinstance(obj, basestring): 4495 obj = self.to_unicode(obj) 4496 if have_serializers: 4497 obj = serializers.loads_json(obj) 4498 elif simplejson: 4499 obj = simplejson.loads(obj) 4500 else: 4501 raise RuntimeError("missing simplejson") 4502 elif is_string and field_is_type('list:string'): 4503 return map(self.to_unicode,obj) 4504 elif is_list: 4505 return map(int,obj) 4506 else: 4507 obj = self.to_unicode(obj) 4508 return obj
4509
4510 - def _insert(self,table,fields):
4511 return 'insert %s in %s' % (fields, table)
4512
4513 - def _count(self,query,distinct=None):
4514 return 'count %s' % repr(query)
4515
4516 - def _select(self,query,fields,attributes):
4517 return 'select %s where %s' % (repr(fields), repr(query))
4518
4519 - def _delete(self,tablename, query):
4520 return 'delete %s where %s' % (repr(tablename),repr(query))
4521
4522 - def _update(self,tablename,query,fields):
4523 return 'update %s (%s) where %s' % (repr(tablename), 4524 repr(fields),repr(query))
4525
4526 - def commit(self):
4527 """ 4528 remember: no transactions on many NoSQL 4529 """ 4530 pass
4531
4532 - def rollback(self):
4533 """ 4534 remember: no transactions on many NoSQL 4535 """ 4536 pass
4537
4538 - def close_connection(self):
4539 """ 4540 remember: no transactions on many NoSQL 4541 """ 4542 pass
4543 4544 4545 # these functions should never be called!
4546 - def OR(self,first,second): raise SyntaxError("Not supported")
4547 - def AND(self,first,second): raise SyntaxError("Not supported")
4548 - def AS(self,first,second): raise SyntaxError("Not supported")
4549 - def ON(self,first,second): raise SyntaxError("Not supported")
4550 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4551 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4552 - def ADD(self,first,second): raise SyntaxError("Not supported")
4553 - def SUB(self,first,second): raise SyntaxError("Not supported")
4554 - def MUL(self,first,second): raise SyntaxError("Not supported")
4555 - def DIV(self,first,second): raise SyntaxError("Not supported")
4556 - def LOWER(self,first): raise SyntaxError("Not supported")
4557 - def UPPER(self,first): raise SyntaxError("Not supported")
4558 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4559 - def LENGTH(self, first): raise SyntaxError("Not supported")
4560 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4561 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4562 - def RANDOM(self): raise SyntaxError("Not supported")
4563 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4564 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4565 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4566 - def drop(self,table,mode): raise SyntaxError("Not supported")
4567 - def alias(self,table,alias): raise SyntaxError("Not supported")
4568 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4569 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4570 - def prepare(self,key): raise SyntaxError("Not supported")
4571 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4572 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4573 - def concat_add(self,table): raise SyntaxError("Not supported")
4574 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4575 - def create_sequence_and_triggers(self, query, table, **args): pass
4576 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4577 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4578 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4579 - def lastrowid(self,table): raise SyntaxError("Not supported")
4580 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4581
4582 4583 -class GAEF(object):
4584 - def __init__(self,name,op,value,apply):
4585 self.name=name=='id' and '__key__' or name 4586 self.op=op 4587 self.value=value 4588 self.apply=apply
4589 - def __repr__(self):
4590 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4591
4592 -class GoogleDatastoreAdapter(NoSQLAdapter):
4593 uploads_in_blob = True 4594 types = {} 4595
4596 - def file_exists(self, filename): pass
4597 - def file_open(self, filename, mode='rb', lock=True): pass
4598 - def file_close(self, fileobj): pass
4599 4600 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4601
4602 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4603 credential_decoder=IDENTITY, driver_args={}, 4604 adapter_args={}, do_connect=True, after_connection=None):
4605 self.types.update({ 4606 'boolean': gae.BooleanProperty, 4607 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4608 'text': gae.TextProperty, 4609 'json': gae.TextProperty, 4610 'password': gae.StringProperty, 4611 'blob': gae.BlobProperty, 4612 'upload': gae.StringProperty, 4613 'integer': gae.IntegerProperty, 4614 'bigint': gae.IntegerProperty, 4615 'float': gae.FloatProperty, 4616 'double': gae.FloatProperty, 4617 'decimal': GAEDecimalProperty, 4618 'date': gae.DateProperty, 4619 'time': gae.TimeProperty, 4620 'datetime': gae.DateTimeProperty, 4621 'id': None, 4622 'reference': gae.IntegerProperty, 4623 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4624 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4625 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4626 }) 4627 self.db = db 4628 self.uri = uri 4629 self.dbengine = 'google:datastore' 4630 self.folder = folder 4631 db['_lastsql'] = '' 4632 self.db_codec = 'UTF-8' 4633 self._after_connection = after_connection 4634 self.pool_size = 0 4635 match = self.REGEX_NAMESPACE.match(uri) 4636 if match: 4637 namespace_manager.set_namespace(match.group('namespace'))
4638
4639 - def parse_id(self, value, field_type):
4640 return value
4641
4642 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4643 myfields = {} 4644 for field in table: 4645 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4646 continue 4647 attr = {} 4648 if isinstance(field.custom_qualifier, dict): 4649 #this is custom properties to add to the GAE field declartion 4650 attr = field.custom_qualifier 4651 field_type = field.type 4652 if isinstance(field_type, SQLCustomType): 4653 ftype = self.types[field_type.native or field_type.type](**attr) 4654 elif isinstance(field_type, gae.Property): 4655 ftype = field_type 4656 elif field_type.startswith('id'): 4657 continue 4658 elif field_type.startswith('decimal'): 4659 precision, scale = field_type[7:].strip('()').split(',') 4660 precision = int(precision) 4661 scale = int(scale) 4662 ftype = GAEDecimalProperty(precision, scale, **attr) 4663 elif field_type.startswith('reference'): 4664 if field.notnull: 4665 attr = dict(required=True) 4666 referenced = field_type[10:].strip() 4667 ftype = self.types[field_type[:9]](referenced, **attr) 4668 elif field_type.startswith('list:reference'): 4669 if field.notnull: 4670 attr['required'] = True 4671 referenced = field_type[15:].strip() 4672 ftype = self.types[field_type[:14]](**attr) 4673 elif field_type.startswith('list:'): 4674 ftype = self.types[field_type](**attr) 4675 elif not field_type in self.types\ 4676 or not self.types[field_type]: 4677 raise SyntaxError('Field: unknown field type: %s' % field_type) 4678 else: 4679 ftype = self.types[field_type](**attr) 4680 myfields[field.name] = ftype 4681 if not polymodel: 4682 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4683 elif polymodel==True: 4684 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4685 elif isinstance(polymodel,Table): 4686 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4687 else: 4688 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4689 return None
4690
4691 - def expand(self,expression,field_type=None):
4692 if isinstance(expression,Field): 4693 if expression.type in ('text', 'blob', 'json'): 4694 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4695 return expression.name 4696 elif isinstance(expression, (Expression, Query)): 4697 if not expression.second is None: 4698 return expression.op(expression.first, expression.second) 4699 elif not expression.first is None: 4700 return expression.op(expression.first) 4701 else: 4702 return expression.op() 4703 elif field_type: 4704 return self.represent(expression,field_type) 4705 elif isinstance(expression,(list,tuple)): 4706 return ','.join([self.represent(item,field_type) for item in expression]) 4707 else: 4708 return str(expression)
4709 4710 ### TODO from gql.py Expression
4711 - def AND(self,first,second):
4712 a = self.expand(first) 4713 b = self.expand(second) 4714 if b[0].name=='__key__' and a[0].name!='__key__': 4715 return b+a 4716 return a+b
4717
4718 - def EQ(self,first,second=None):
4719 if isinstance(second, Key): 4720 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4721 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4722
4723 - def NE(self,first,second=None):
4724 if first.type != 'id': 4725 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4726 else: 4727 if not second is None: 4728 second = Key.from_path(first._tablename, long(second)) 4729 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4730
4731 - def LT(self,first,second=None):
4732 if first.type != 'id': 4733 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4734 else: 4735 second = Key.from_path(first._tablename, long(second)) 4736 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4737
4738 - def LE(self,first,second=None):
4739 if first.type != 'id': 4740 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4741 else: 4742 second = Key.from_path(first._tablename, long(second)) 4743 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4744
4745 - def GT(self,first,second=None):
4746 if first.type != 'id' or second==0 or second == '0': 4747 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4748 else: 4749 second = Key.from_path(first._tablename, long(second)) 4750 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4751
4752 - def GE(self,first,second=None):
4753 if first.type != 'id': 4754 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4755 else: 4756 second = Key.from_path(first._tablename, long(second)) 4757 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4758
4759 - def INVERT(self,first):
4760 return '-%s' % first.name
4761
4762 - def COMMA(self,first,second):
4763 return '%s, %s' % (self.expand(first),self.expand(second))
4764
4765 - def BELONGS(self,first,second=None):
4766 if not isinstance(second,(list, tuple)): 4767 raise SyntaxError("Not supported") 4768 if first.type != 'id': 4769 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4770 else: 4771 second = [Key.from_path(first._tablename, int(i)) for i in second] 4772 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4773
4774 - def CONTAINS(self,first,second,case_sensitive=False):
4775 # silently ignoring: GAE can only do case sensitive matches! 4776 if not first.type.startswith('list:'): 4777 raise SyntaxError("Not supported") 4778 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4779
4780 - def NOT(self,first):
4781 nops = { self.EQ: self.NE, 4782 self.NE: self.EQ, 4783 self.LT: self.GE, 4784 self.GT: self.LE, 4785 self.LE: self.GT, 4786 self.GE: self.LT} 4787 if not isinstance(first,Query): 4788 raise SyntaxError("Not suported") 4789 nop = nops.get(first.op,None) 4790 if not nop: 4791 raise SyntaxError("Not suported %s" % first.op.__name__) 4792 first.op = nop 4793 return self.expand(first)
4794
4795 - def truncate(self,table,mode):
4796 self.db(self.db._adapter.id_query(table)).delete()
4797
4798 - def select_raw(self,query,fields=None,attributes=None):
4799 db = self.db 4800 fields = fields or [] 4801 attributes = attributes or {} 4802 args_get = attributes.get 4803 new_fields = [] 4804 for item in fields: 4805 if isinstance(item,SQLALL): 4806 new_fields += item._table 4807 else: 4808 new_fields.append(item) 4809 fields = new_fields 4810 if query: 4811 tablename = self.get_table(query) 4812 elif fields: 4813 tablename = fields[0].tablename 4814 query = db._adapter.id_query(fields[0].table) 4815 else: 4816 raise SyntaxError("Unable to determine a tablename") 4817 4818 if query: 4819 if use_common_filters(query): 4820 query = self.common_filter(query,[tablename]) 4821 4822 #tableobj is a GAE Model class (or subclass) 4823 tableobj = db[tablename]._tableobj 4824 filters = self.expand(query) 4825 4826 projection = None 4827 if len(db[tablename].fields) == len(fields): 4828 #getting all fields, not a projection query 4829 projection = None 4830 elif args_get('projection') == True: 4831 projection = [] 4832 for f in fields: 4833 if f.type in ['text', 'blob', 'json']: 4834 raise SyntaxError( 4835 "text and blob field types not allowed in projection queries") 4836 else: 4837 projection.append(f.name) 4838 elif args_get('filterfields') == True: 4839 projection = [] 4840 for f in fields: 4841 projection.append(f.name) 4842 4843 # real projection's can't include 'id'. 4844 # it will be added to the result later 4845 query_projection = [ 4846 p for p in projection if \ 4847 p != db[tablename]._id.name] if projection and \ 4848 args_get('projection') == True\ 4849 else None 4850 4851 cursor = None 4852 if isinstance(args_get('reusecursor'), str): 4853 cursor = args_get('reusecursor') 4854 items = gae.Query(tableobj, projection=query_projection, 4855 cursor=cursor) 4856 4857 for filter in filters: 4858 if args_get('projection') == True and \ 4859 filter.name in query_projection and \ 4860 filter.op in ['=', '<=', '>=']: 4861 raise SyntaxError( 4862 "projection fields cannot have equality filters") 4863 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4864 continue 4865 elif filter.name=='__key__' and filter.op=='=': 4866 if filter.value==0: 4867 items = [] 4868 elif isinstance(filter.value, Key): 4869 # key qeuries return a class instance, 4870 # can't use projection 4871 # extra values will be ignored in post-processing later 4872 item = tableobj.get(filter.value) 4873 items = (item and [item]) or [] 4874 else: 4875 # key qeuries return a class instance, 4876 # can't use projection 4877 # extra values will be ignored in post-processing later 4878 item = tableobj.get_by_id(filter.value) 4879 items = (item and [item]) or [] 4880 elif isinstance(items,list): # i.e. there is a single record! 4881 items = [i for i in items if filter.apply( 4882 getattr(item,filter.name),filter.value)] 4883 else: 4884 if filter.name=='__key__' and filter.op != 'in': 4885 items.order('__key__') 4886 items = items.filter('%s %s' % (filter.name,filter.op), 4887 filter.value) 4888 if not isinstance(items,list): 4889 if args_get('left', None): 4890 raise SyntaxError('Set: no left join in appengine') 4891 if args_get('groupby', None): 4892 raise SyntaxError('Set: no groupby in appengine') 4893 orderby = args_get('orderby', False) 4894 if orderby: 4895 ### THIS REALLY NEEDS IMPROVEMENT !!! 4896 if isinstance(orderby, (list, tuple)): 4897 orderby = xorify(orderby) 4898 if isinstance(orderby,Expression): 4899 orderby = self.expand(orderby) 4900 orders = orderby.split(', ') 4901 for order in orders: 4902 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4903 items = items.order(order) 4904 if args_get('limitby', None): 4905 (lmin, lmax) = attributes['limitby'] 4906 (limit, offset) = (lmax - lmin, lmin) 4907 rows = items.fetch(limit,offset=offset) 4908 #cursor is only useful if there was a limit and we didn't return 4909 # all results 4910 if args_get('reusecursor'): 4911 db['_lastcursor'] = items.cursor() 4912 items = rows 4913 return (items, tablename, projection or db[tablename].fields)
4914
4915 - def select(self,query,fields,attributes):
4916 """ 4917 This is the GAE version of select. some notes to consider: 4918 - db['_lastsql'] is not set because there is not SQL statement string 4919 for a GAE query 4920 - 'nativeRef' is a magical fieldname used for self references on GAE 4921 - optional attribute 'projection' when set to True will trigger 4922 use of the GAE projection queries. note that there are rules for 4923 what is accepted imposed by GAE: each field must be indexed, 4924 projection queries cannot contain blob or text fields, and you 4925 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4926 - optional attribute 'filterfields' when set to True web2py will only 4927 parse the explicitly listed fields into the Rows object, even though 4928 all fields are returned in the query. This can be used to reduce 4929 memory usage in cases where true projection queries are not 4930 usable. 4931 - optional attribute 'reusecursor' allows use of cursor with queries 4932 that have the limitby attribute. Set the attribute to True for the 4933 first query, set it to the value of db['_lastcursor'] to continue 4934 a previous query. The user must save the cursor value between 4935 requests, and the filters must be identical. It is up to the user 4936 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4937 """ 4938 4939 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4940 # self.db['_lastsql'] = self._select(query,fields,attributes) 4941 rows = [[(t==self.db[tablename]._id.name and item) or \ 4942 (t=='nativeRef' and item) or getattr(item, t) \ 4943 for t in fields] for item in items] 4944 colnames = ['%s.%s' % (tablename, t) for t in fields] 4945 processor = attributes.get('processor',self.parse) 4946 return processor(rows,fields,colnames,False)
4947
4948 - def count(self,query,distinct=None,limit=None):
4949 if distinct: 4950 raise RuntimeError("COUNT DISTINCT not supported") 4951 (items, tablename, fields) = self.select_raw(query) 4952 # self.db['_lastsql'] = self._count(query) 4953 try: 4954 return len(items) 4955 except TypeError: 4956 return items.count(limit=limit)
4957
4958 - def delete(self,tablename, query):
4959 """ 4960 This function was changed on 2010-05-04 because according to 4961 http://code.google.com/p/googleappengine/issues/detail?id=3119 4962 GAE no longer supports deleting more than 1000 records. 4963 """ 4964 # self.db['_lastsql'] = self._delete(tablename,query) 4965 (items, tablename, fields) = self.select_raw(query) 4966 # items can be one item or a query 4967 if not isinstance(items,list): 4968 #use a keys_only query to ensure that this runs as a datastore 4969 # small operations 4970 leftitems = items.fetch(1000, keys_only=True) 4971 counter = 0 4972 while len(leftitems): 4973 counter += len(leftitems) 4974 gae.delete(leftitems) 4975 leftitems = items.fetch(1000, keys_only=True) 4976 else: 4977 counter = len(items) 4978 gae.delete(items) 4979 return counter
4980
4981 - def update(self,tablename,query,update_fields):
4982 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 4983 (items, tablename, fields) = self.select_raw(query) 4984 counter = 0 4985 for item in items: 4986 for field, value in update_fields: 4987 setattr(item, field.name, self.represent(value,field.type)) 4988 item.put() 4989 counter += 1 4990 LOGGER.info(str(counter)) 4991 return counter
4992
4993 - def insert(self,table,fields):
4994 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 4995 # table._db['_lastsql'] = self._insert(table,fields) 4996 tmp = table._tableobj(**dfields) 4997 tmp.put() 4998 rid = Reference(tmp.key().id()) 4999 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 5000 return rid
5001
5002 - def bulk_insert(self,table,items):
5003 parsed_items = [] 5004 for item in items: 5005 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 5006 parsed_items.append(table._tableobj(**dfields)) 5007 gae.put(parsed_items) 5008 return True
5009
5010 -def uuid2int(uuidv):
5011 return uuid.UUID(uuidv).int
5012
5013 -def int2uuid(n):
5014 return str(uuid.UUID(int=n))
5015
5016 -class CouchDBAdapter(NoSQLAdapter):
5017 drivers = ('couchdb',) 5018 5019 uploads_in_blob = True 5020 types = { 5021 'boolean': bool, 5022 'string': str, 5023 'text': str, 5024 'json': str, 5025 'password': str, 5026 'blob': str, 5027 'upload': str, 5028 'integer': long, 5029 'bigint': long, 5030 'float': float, 5031 'double': float, 5032 'date': datetime.date, 5033 'time': datetime.time, 5034 'datetime': datetime.datetime, 5035 'id': long, 5036 'reference': long, 5037 'list:string': list, 5038 'list:integer': list, 5039 'list:reference': list, 5040 } 5041
5042 - def file_exists(self, filename): pass
5043 - def file_open(self, filename, mode='rb', lock=True): pass
5044 - def file_close(self, fileobj): pass
5045
5046 - def expand(self,expression,field_type=None):
5047 if isinstance(expression,Field): 5048 if expression.type=='id': 5049 return "%s._id" % expression.tablename 5050 return BaseAdapter.expand(self,expression,field_type)
5051
5052 - def AND(self,first,second):
5053 return '(%s && %s)' % (self.expand(first),self.expand(second))
5054
5055 - def OR(self,first,second):
5056 return '(%s || %s)' % (self.expand(first),self.expand(second))
5057
5058 - def EQ(self,first,second):
5059 if second is None: 5060 return '(%s == null)' % self.expand(first) 5061 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
5062
5063 - def NE(self,first,second):
5064 if second is None: 5065 return '(%s != null)' % self.expand(first) 5066 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
5067
5068 - def COMMA(self,first,second):
5069 return '%s + %s' % (self.expand(first),self.expand(second))
5070
5071 - def represent(self, obj, fieldtype):
5072 value = NoSQLAdapter.represent(self, obj, fieldtype) 5073 if fieldtype=='id': 5074 return repr(str(long(value))) 5075 elif fieldtype in ('date','time','datetime','boolean'): 5076 return serializers.json(value) 5077 return repr(not isinstance(value,unicode) and value \ 5078 or value and value.encode('utf8'))
5079
5080 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 5081 pool_size=0,folder=None,db_codec ='UTF-8', 5082 credential_decoder=IDENTITY, driver_args={}, 5083 adapter_args={}, do_connect=True, after_connection=None):
5084 self.db = db 5085 self.uri = uri 5086 if do_connect: self.find_driver(adapter_args) 5087 self.dbengine = 'couchdb' 5088 self.folder = folder 5089 db['_lastsql'] = '' 5090 self.db_codec = 'UTF-8' 5091 self._after_connection = after_connection 5092 self.pool_size = pool_size 5093 5094 url='http://'+uri[10:] 5095 def connector(url=url,driver_args=driver_args): 5096 return self.driver.Server(url,**driver_args)
5097 self.reconnect(connector,cursor=False)
5098
5099 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
5100 if migrate: 5101 try: 5102 self.connection.create(table._tablename) 5103 except: 5104 pass
5105
5106 - def insert(self,table,fields):
5107 id = uuid2int(web2py_uuid()) 5108 ctable = self.connection[table._tablename] 5109 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 5110 values['_id'] = str(id) 5111 ctable.save(values) 5112 return id
5113
5114 - def _select(self,query,fields,attributes):
5115 if not isinstance(query,Query): 5116 raise SyntaxError("Not Supported") 5117 for key in set(attributes.keys())-SELECT_ARGS: 5118 raise SyntaxError('invalid select attribute: %s' % key) 5119 new_fields=[] 5120 for item in fields: 5121 if isinstance(item,SQLALL): 5122 new_fields += item._table 5123 else: 5124 new_fields.append(item) 5125 def uid(fd): 5126 return fd=='id' and '_id' or fd
5127 def get(row,fd): 5128 return fd=='id' and long(row['_id']) or row.get(fd,None) 5129 fields = new_fields 5130 tablename = self.get_table(query) 5131 fieldnames = [f.name for f in (fields or self.db[tablename])] 5132 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5133 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5134 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5135 dict(t=tablename, 5136 query=self.expand(query), 5137 order='%s._id' % tablename, 5138 fields=fields) 5139 return fn, colnames 5140
5141 - def select(self,query,fields,attributes):
5142 if not isinstance(query,Query): 5143 raise SyntaxError("Not Supported") 5144 fn, colnames = self._select(query,fields,attributes) 5145 tablename = colnames[0].split('.')[0] 5146 ctable = self.connection[tablename] 5147 rows = [cols['value'] for cols in ctable.query(fn)] 5148 processor = attributes.get('processor',self.parse) 5149 return processor(rows,fields,colnames,False)
5150
5151 - def delete(self,tablename,query):
5152 if not isinstance(query,Query): 5153 raise SyntaxError("Not Supported") 5154 if query.first.type=='id' and query.op==self.EQ: 5155 id = query.second 5156 tablename = query.first.tablename 5157 assert(tablename == query.first.tablename) 5158 ctable = self.connection[tablename] 5159 try: 5160 del ctable[str(id)] 5161 return 1 5162 except couchdb.http.ResourceNotFound: 5163 return 0 5164 else: 5165 tablename = self.get_table(query) 5166 rows = self.select(query,[self.db[tablename]._id],{}) 5167 ctable = self.connection[tablename] 5168 for row in rows: 5169 del ctable[str(row.id)] 5170 return len(rows)
5171
5172 - def update(self,tablename,query,fields):
5173 if not isinstance(query,Query): 5174 raise SyntaxError("Not Supported") 5175 if query.first.type=='id' and query.op==self.EQ: 5176 id = query.second 5177 tablename = query.first.tablename 5178 ctable = self.connection[tablename] 5179 try: 5180 doc = ctable[str(id)] 5181 for key,value in fields: 5182 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5183 ctable.save(doc) 5184 return 1 5185 except couchdb.http.ResourceNotFound: 5186 return 0 5187 else: 5188 tablename = self.get_table(query) 5189 rows = self.select(query,[self.db[tablename]._id],{}) 5190 ctable = self.connection[tablename] 5191 table = self.db[tablename] 5192 for row in rows: 5193 doc = ctable[str(row.id)] 5194 for key,value in fields: 5195 doc[key.name] = self.represent(value,table[key.name].type) 5196 ctable.save(doc) 5197 return len(rows)
5198
5199 - def count(self,query,distinct=None):
5200 if distinct: 5201 raise RuntimeError("COUNT DISTINCT not supported") 5202 if not isinstance(query,Query): 5203 raise SyntaxError("Not Supported") 5204 tablename = self.get_table(query) 5205 rows = self.select(query,[self.db[tablename]._id],{}) 5206 return len(rows)
5207
5208 -def cleanup(text):
5209 """ 5210 validates that the given text is clean: only contains [0-9a-zA-Z_] 5211 """ 5212 if not REGEX_ALPHANUMERIC.match(text): 5213 raise SyntaxError('invalid table or field name: %s' % text) 5214 return text
5215
5216 -class MongoDBAdapter(NoSQLAdapter):
5217 native_json = True 5218 drivers = ('pymongo',) 5219 5220 uploads_in_blob = True 5221 5222 types = { 5223 'boolean': bool, 5224 'string': str, 5225 'text': str, 5226 'json': str, 5227 'password': str, 5228 'blob': str, 5229 'upload': str, 5230 'integer': long, 5231 'bigint': long, 5232 'float': float, 5233 'double': float, 5234 'date': datetime.date, 5235 'time': datetime.time, 5236 'datetime': datetime.datetime, 5237 'id': long, 5238 'reference': long, 5239 'list:string': list, 5240 'list:integer': list, 5241 'list:reference': list, 5242 } 5243 5244 error_messages = {"javascript_needed": "This must yet be replaced" + 5245 " with javascript in order to work."} 5246
5247 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5248 pool_size=0, folder=None, db_codec ='UTF-8', 5249 credential_decoder=IDENTITY, driver_args={}, 5250 adapter_args={}, do_connect=True, after_connection=None):
5251 5252 self.db = db 5253 self.uri = uri 5254 if do_connect: self.find_driver(adapter_args) 5255 import random 5256 from bson.objectid import ObjectId 5257 from bson.son import SON 5258 import pymongo.uri_parser 5259 5260 m = pymongo.uri_parser.parse_uri(uri) 5261 5262 self.SON = SON 5263 self.ObjectId = ObjectId 5264 self.random = random 5265 5266 self.dbengine = 'mongodb' 5267 self.folder = folder 5268 db['_lastsql'] = '' 5269 self.db_codec = 'UTF-8' 5270 self._after_connection = after_connection 5271 self.pool_size = pool_size 5272 #this is the minimum amount of replicates that it should wait 5273 # for on insert/update 5274 self.minimumreplication = adapter_args.get('minimumreplication',0) 5275 # by default all inserts and selects are performand asynchronous, 5276 # but now the default is 5277 # synchronous, except when overruled by either this default or 5278 # function parameter 5279 self.safe = adapter_args.get('safe',True) 5280 5281 if isinstance(m,tuple): 5282 m = {"database" : m[1]} 5283 if m.get('database')==None: 5284 raise SyntaxError("Database is required!") 5285 5286 def connector(uri=self.uri,m=m): 5287 # Connection() is deprecated 5288 if hasattr(self.driver, "MongoClient"): 5289 Connection = self.driver.MongoClient 5290 else: 5291 Connection = self.driver.Connection 5292 return Connection(uri)[m.get('database')]
5293 5294 self.reconnect(connector,cursor=False)
5295
5296 - def object_id(self, arg=None):
5297 """ Convert input to a valid Mongodb ObjectId instance 5298 5299 self.object_id("<random>") -> ObjectId (not unique) instance """ 5300 if not arg: 5301 arg = 0 5302 if isinstance(arg, basestring): 5303 # we assume an integer as default input 5304 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5305 if arg.isdigit() and (not rawhex): 5306 arg = int(arg) 5307 elif arg == "<random>": 5308 arg = int("0x%sL" % \ 5309 "".join([self.random.choice("0123456789abcdef") \ 5310 for x in range(24)]), 0) 5311 elif arg.isalnum(): 5312 if not arg.startswith("0x"): 5313 arg = "0x%s" % arg 5314 try: 5315 arg = int(arg, 0) 5316 except ValueError, e: 5317 raise ValueError( 5318 "invalid objectid argument string: %s" % e) 5319 else: 5320 raise ValueError("Invalid objectid argument string. " + 5321 "Requires an integer or base 16 value") 5322 elif isinstance(arg, self.ObjectId): 5323 return arg 5324 5325 if not isinstance(arg, (int, long)): 5326 raise TypeError("object_id argument must be of type " + 5327 "ObjectId or an objectid representable integer") 5328 if arg == 0: 5329 hexvalue = "".zfill(24) 5330 else: 5331 hexvalue = hex(arg)[2:].replace("L", "") 5332 return self.ObjectId(hexvalue)
5333
5334 - def parse_reference(self, value, field_type):
5335 # here we have to check for ObjectID before base parse 5336 if isinstance(value, self.ObjectId): 5337 value = long(str(value), 16) 5338 return super(MongoDBAdapter, 5339 self).parse_reference(value, field_type)
5340
5341 - def parse_id(self, value, field_type):
5342 if isinstance(value, self.ObjectId): 5343 value = long(str(value), 16) 5344 return super(MongoDBAdapter, 5345 self).parse_id(value, field_type)
5346
5347 - def represent(self, obj, fieldtype):
5348 # the base adatpter does not support MongoDB ObjectId 5349 if isinstance(obj, self.ObjectId): 5350 value = obj 5351 else: 5352 value = NoSQLAdapter.represent(self, obj, fieldtype) 5353 # reference types must be convert to ObjectID 5354 if fieldtype =='date': 5355 if value == None: 5356 return value 5357 # this piece of data can be stripped off based on the fieldtype 5358 t = datetime.time(0, 0, 0) 5359 # mongodb doesn't has a date object and so it must datetime, 5360 # string or integer 5361 return datetime.datetime.combine(value, t) 5362 elif fieldtype == 'time': 5363 if value == None: 5364 return value 5365 # this piece of data can be stripped of based on the fieldtype 5366 d = datetime.date(2000, 1, 1) 5367 # mongodb doesn't has a time object and so it must datetime, 5368 # string or integer 5369 return datetime.datetime.combine(d, value) 5370 elif fieldtype == "blob": 5371 from bson import Binary 5372 if not isinstance(value, Binary): 5373 return Binary(value) 5374 return value 5375 elif (isinstance(fieldtype, basestring) and 5376 fieldtype.startswith('list:')): 5377 if fieldtype.startswith('list:reference'): 5378 newval = [] 5379 for v in value: 5380 newval.append(self.object_id(v)) 5381 return newval 5382 return value 5383 elif ((isinstance(fieldtype, basestring) and 5384 fieldtype.startswith("reference")) or 5385 (isinstance(fieldtype, Table)) or fieldtype=="id"): 5386 value = self.object_id(value) 5387 return value
5388
5389 - def create_table(self, table, migrate=True, fake_migrate=False, 5390 polymodel=None, isCapped=False):
5391 if isCapped: 5392 raise RuntimeError("Not implemented")
5393
5394 - def count(self, query, distinct=None, snapshot=True):
5395 if distinct: 5396 raise RuntimeError("COUNT DISTINCT not supported") 5397 if not isinstance(query,Query): 5398 raise SyntaxError("Not Supported") 5399 tablename = self.get_table(query) 5400 return long(self.select(query,[self.db[tablename]._id], {}, 5401 count=True,snapshot=snapshot)['count'])
5402 # Maybe it would be faster if we just implemented the pymongo 5403 # .count() function which is probably quicker? 5404 # therefor call __select() connection[table].find(query).count() 5405 # Since this will probably reduce the return set? 5406
5407 - def expand(self, expression, field_type=None):
5408 if isinstance(expression, Query): 5409 # any query using 'id':= 5410 # set name as _id (as per pymongo/mongodb primary key) 5411 # convert second arg to an objectid field 5412 # (if its not already) 5413 # if second arg is 0 convert to objectid 5414 if isinstance(expression.first,Field) and \ 5415 ((expression.first.type == 'id') or \ 5416 ("reference" in expression.first.type)): 5417 if expression.first.type == 'id': 5418 expression.first.name = '_id' 5419 # cast to Mongo ObjectId 5420 if isinstance(expression.second, (tuple, list, set)): 5421 expression.second = [self.object_id(item) for 5422 item in expression.second] 5423 else: 5424 expression.second = self.object_id(expression.second) 5425 result = expression.op(expression.first, expression.second) 5426 5427 if isinstance(expression, Field): 5428 if expression.type=='id': 5429 result = "_id" 5430 else: 5431 result = expression.name 5432 elif isinstance(expression, (Expression, Query)): 5433 if not expression.second is None: 5434 result = expression.op(expression.first, expression.second) 5435 elif not expression.first is None: 5436 result = expression.op(expression.first) 5437 elif not isinstance(expression.op, str): 5438 result = expression.op() 5439 else: 5440 result = expression.op 5441 elif field_type: 5442 result = self.represent(expression,field_type) 5443 elif isinstance(expression,(list,tuple)): 5444 result = ','.join(self.represent(item,field_type) for 5445 item in expression) 5446 else: 5447 result = expression 5448 return result
5449
5450 - def drop(self, table, mode=''):
5451 ctable = self.connection[table._tablename] 5452 ctable.drop()
5453
5454 - def truncate(self, table, mode, safe=None):
5455 if safe == None: 5456 safe=self.safe 5457 ctable = self.connection[table._tablename] 5458 ctable.remove(None, safe=True)
5459
5460 - def _select(self, query, fields, attributes):
5461 if 'for_update' in attributes: 5462 logging.warn('mongodb does not support for_update') 5463 for key in set(attributes.keys())-set(('limitby', 5464 'orderby','for_update')): 5465 if attributes[key]!=None: 5466 logging.warn('select attribute not implemented: %s' % key) 5467 5468 new_fields=[] 5469 mongosort_list = [] 5470 5471 # try an orderby attribute 5472 orderby = attributes.get('orderby', False) 5473 limitby = attributes.get('limitby', False) 5474 # distinct = attributes.get('distinct', False) 5475 if orderby: 5476 if isinstance(orderby, (list, tuple)): 5477 orderby = xorify(orderby) 5478 5479 # !!!! need to add 'random' 5480 for f in self.expand(orderby).split(','): 5481 if f.startswith('-'): 5482 mongosort_list.append((f[1:], -1)) 5483 else: 5484 mongosort_list.append((f, 1)) 5485 if limitby: 5486 limitby_skip, limitby_limit = limitby[0], int(limitby[1]) 5487 else: 5488 limitby_skip = limitby_limit = 0 5489 5490 mongofields_dict = self.SON() 5491 mongoqry_dict = {} 5492 for item in fields: 5493 if isinstance(item, SQLALL): 5494 new_fields += item._table 5495 else: 5496 new_fields.append(item) 5497 fields = new_fields 5498 if isinstance(query,Query): 5499 tablename = self.get_table(query) 5500 elif len(fields) != 0: 5501 tablename = fields[0].tablename 5502 else: 5503 raise SyntaxError("The table name could not be found in " + 5504 "the query nor from the select statement.") 5505 mongoqry_dict = self.expand(query) 5506 fields = fields or self.db[tablename] 5507 for field in fields: 5508 mongofields_dict[field.name] = 1 5509 5510 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5511 limitby_limit, limitby_skip
5512
5513 - def select(self, query, fields, attributes, count=False, 5514 snapshot=False):
5515 # TODO: support joins 5516 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5517 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5518 ctable = self.connection[tablename] 5519 5520 if count: 5521 return {'count' : ctable.find( 5522 mongoqry_dict, mongofields_dict, 5523 skip=limitby_skip, limit=limitby_limit, 5524 sort=mongosort_list, snapshot=snapshot).count()} 5525 else: 5526 # pymongo cursor object 5527 mongo_list_dicts = ctable.find(mongoqry_dict, 5528 mongofields_dict, skip=limitby_skip, 5529 limit=limitby_limit, sort=mongosort_list, 5530 snapshot=snapshot) 5531 rows = [] 5532 # populate row in proper order 5533 # Here we replace ._id with .id to follow the standard naming 5534 colnames = [] 5535 newnames = [] 5536 for field in fields: 5537 colname = str(field) 5538 colnames.append(colname) 5539 tablename, fieldname = colname.split(".") 5540 if fieldname == "_id": 5541 # Mongodb reserved uuid key 5542 field.name = "id" 5543 newnames.append(".".join((tablename, field.name))) 5544 5545 for record in mongo_list_dicts: 5546 row=[] 5547 for colname in colnames: 5548 tablename, fieldname = colname.split(".") 5549 # switch to Mongo _id uuids for retrieving 5550 # record id's 5551 if fieldname == "id": fieldname = "_id" 5552 if fieldname in record: 5553 value = record[fieldname] 5554 else: 5555 value = None 5556 row.append(value) 5557 rows.append(row) 5558 5559 processor = attributes.get('processor', self.parse) 5560 result = processor(rows, fields, newnames, False) 5561 return result
5562
5563 - def _insert(self, table, fields):
5564 values = dict() 5565 for k, v in fields: 5566 if not k.name in ["id", "safe"]: 5567 fieldname = k.name 5568 fieldtype = table[k.name].type 5569 values[fieldname] = self.represent(v, fieldtype) 5570 return values
5571 5572 # Safe determines whether a asynchronious request is done or a 5573 # synchronious action is done 5574 # For safety, we use by default synchronous requests
5575 - def insert(self, table, fields, safe=None):
5576 if safe==None: 5577 safe = self.safe 5578 ctable = self.connection[table._tablename] 5579 values = self._insert(table, fields) 5580 ctable.insert(values, safe=safe) 5581 return long(str(values['_id']), 16)
5582 5583 #this function returns a dict with the where clause and update fields
5584 - def _update(self, tablename, query, fields):
5585 if not isinstance(query, Query): 5586 raise SyntaxError("Not Supported") 5587 filter = None 5588 if query: 5589 filter = self.expand(query) 5590 # do not try to update id fields to avoid backend errors 5591 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5592 k, v in fields if (not k.name in ("_id", "id")))} 5593 return modify, filter
5594
5595 - def update(self, tablename, query, fields, safe=None):
5596 if safe == None: 5597 safe = self.safe 5598 # return amount of adjusted rows or zero, but no exceptions 5599 # @ related not finding the result 5600 if not isinstance(query, Query): 5601 raise RuntimeError("Not implemented") 5602 amount = self.count(query, False) 5603 modify, filter = self._update(tablename, query, fields) 5604 try: 5605 result = self.connection[tablename].update(filter, 5606 modify, multi=True, safe=safe) 5607 if safe: 5608 try: 5609 # if result count is available fetch it 5610 return result["n"] 5611 except (KeyError, AttributeError, TypeError): 5612 return amount 5613 else: 5614 return amount 5615 except Exception, e: 5616 # TODO Reverse update query to verifiy that the query succeded 5617 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5618
5619 - def _delete(self, tablename, query):
5620 if not isinstance(query, Query): 5621 raise RuntimeError("query type %s is not supported" % \ 5622 type(query)) 5623 return self.expand(query)
5624
5625 - def delete(self, tablename, query, safe=None):
5626 if safe is None: 5627 safe = self.safe 5628 amount = 0 5629 amount = self.count(query, False) 5630 filter = self._delete(tablename, query) 5631 self.connection[tablename].remove(filter, safe=safe) 5632 return amount
5633
5634 - def bulk_insert(self, table, items):
5635 return [self.insert(table,item) for item in items]
5636 5637 ## OPERATORS
5638 - def INVERT(self, first):
5639 #print "in invert first=%s" % first 5640 return '-%s' % self.expand(first)
5641 5642 # TODO This will probably not work:(
5643 - def NOT(self, first):
5644 result = {} 5645 result["$not"] = self.expand(first) 5646 return result
5647
5648 - def AND(self,first,second):
5649 f = self.expand(first) 5650 s = self.expand(second) 5651 f.update(s) 5652 return f
5653
5654 - def OR(self,first,second):
5655 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5656 result = {} 5657 f = self.expand(first) 5658 s = self.expand(second) 5659 result['$or'] = [f,s] 5660 return result
5661
5662 - def BELONGS(self, first, second):
5663 if isinstance(second, str): 5664 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5665 elif second==[] or second==() or second==set(): 5666 return {1:0} 5667 items = [self.expand(item, first.type) for item in second] 5668 return {self.expand(first) : {"$in" : items} }
5669
5670 - def EQ(self,first,second=None):
5671 result = {} 5672 result[self.expand(first)] = self.expand(second) 5673 return result
5674
5675 - def NE(self, first, second=None):
5676 result = {} 5677 result[self.expand(first)] = {'$ne': self.expand(second)} 5678 return result
5679
5680 - def LT(self,first,second=None):
5681 if second is None: 5682 raise RuntimeError("Cannot compare %s < None" % first) 5683 result = {} 5684 result[self.expand(first)] = {'$lt': self.expand(second)} 5685 return result
5686
5687 - def LE(self,first,second=None):
5688 if second is None: 5689 raise RuntimeError("Cannot compare %s <= None" % first) 5690 result = {} 5691 result[self.expand(first)] = {'$lte': self.expand(second)} 5692 return result
5693
5694 - def GT(self,first,second):
5695 result = {} 5696 result[self.expand(first)] = {'$gt': self.expand(second)} 5697 return result
5698
5699 - def GE(self,first,second=None):
5700 if second is None: 5701 raise RuntimeError("Cannot compare %s >= None" % first) 5702 result = {} 5703 result[self.expand(first)] = {'$gte': self.expand(second)} 5704 return result
5705
5706 - def ADD(self, first, second):
5707 raise NotImplementedError(self.error_messages["javascript_needed"]) 5708 return '%s + %s' % (self.expand(first), 5709 self.expand(second, first.type))
5710
5711 - def SUB(self, first, second):
5712 raise NotImplementedError(self.error_messages["javascript_needed"]) 5713 return '(%s - %s)' % (self.expand(first), 5714 self.expand(second, first.type))
5715
5716 - def MUL(self, first, second):
5717 raise NotImplementedError(self.error_messages["javascript_needed"]) 5718 return '(%s * %s)' % (self.expand(first), 5719 self.expand(second, first.type))
5720
5721 - def DIV(self, first, second):
5722 raise NotImplementedError(self.error_messages["javascript_needed"]) 5723 return '(%s / %s)' % (self.expand(first), 5724 self.expand(second, first.type))
5725
5726 - def MOD(self, first, second):
5727 raise NotImplementedError(self.error_messages["javascript_needed"]) 5728 return '(%s %% %s)' % (self.expand(first), 5729 self.expand(second, first.type))
5730
5731 - def AS(self, first, second):
5732 raise NotImplementedError(self.error_messages["javascript_needed"]) 5733 return '%s AS %s' % (self.expand(first), second)
5734 5735 # We could implement an option that simulates a full featured SQL 5736 # database. But I think the option should be set explicit or 5737 # implemented as another library.
5738 - def ON(self, first, second):
5739 raise NotImplementedError("This is not possible in NoSQL" + 5740 " but can be simulated with a wrapper.") 5741 return '%s ON %s' % (self.expand(first), self.expand(second))
5742 5743 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5744 # WHICH ONE IS BEST? 5745
5746 - def COMMA(self, first, second):
5747 return '%s, %s' % (self.expand(first), self.expand(second))
5748
5749 - def LIKE(self, first, second):
5750 #escaping regex operators? 5751 return {self.expand(first): ('%s' % \ 5752 self.expand(second, 'string').replace('%','/'))}
5753
5754 - def STARTSWITH(self, first, second):
5755 #escaping regex operators? 5756 return {self.expand(first): ('/^%s/' % \ 5757 self.expand(second, 'string'))}
5758
5759 - def ENDSWITH(self, first, second):
5760 #escaping regex operators? 5761 return {self.expand(first): ('/%s^/' % \ 5762 self.expand(second, 'string'))}
5763
5764 - def CONTAINS(self, first, second, case_sensitive=False):
5765 # silently ignore, only case sensitive 5766 # There is a technical difference, but mongodb doesn't support 5767 # that, but the result will be the same 5768 val = second if isinstance(second,self.ObjectId) else \ 5769 {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"} 5770 return {self.expand(first) : val}
5771
5772 - def LIKE(self, first, second):
5773 import re 5774 return {self.expand(first): {'$regex': \ 5775 re.escape(self.expand(second, 5776 'string')).replace('%','.*')}}
5777 5778 #TODO verify full compatibilty with official SQL Like operator
5779 - def STARTSWITH(self, first, second):
5780 #TODO Solve almost the same problem as with endswith 5781 import re 5782 return {self.expand(first): {'$regex' : '^' + 5783 re.escape(self.expand(second, 5784 'string'))}}
5785 5786 #TODO verify full compatibilty with official SQL Like operator
5787 - def ENDSWITH(self, first, second):
5788 #escaping regex operators? 5789 #TODO if searched for a name like zsa_corbitt and the function 5790 # is endswith('a') then this is also returned. 5791 # Aldo it end with a t 5792 import re 5793 return {self.expand(first): {'$regex': \ 5794 re.escape(self.expand(second, 'string')) + '$'}}
5795 5796 #TODO verify full compatibilty with official oracle contains operator
5797 - def CONTAINS(self, first, second, case_sensitive=False):
5798 # silently ignore, only case sensitive 5799 #There is a technical difference, but mongodb doesn't support 5800 # that, but the result will be the same 5801 #TODO contains operators need to be transformed to Regex 5802 return {self.expand(first) : {'$regex': \ 5803 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5804
5805 5806 -class IMAPAdapter(NoSQLAdapter):
5807 drivers = ('imaplib',) 5808 5809 """ IMAP server adapter 5810 5811 This class is intended as an interface with 5812 email IMAP servers to perform simple queries in the 5813 web2py DAL query syntax, so email read, search and 5814 other related IMAP mail services (as those implemented 5815 by brands like Google(r), and Yahoo!(r) 5816 can be managed from web2py applications. 5817 5818 The code uses examples by Yuji Tomita on this post: 5819 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5820 and is based in docs for Python imaplib, python email 5821 and email IETF's (i.e. RFC2060 and RFC3501) 5822 5823 This adapter was tested with a small set of operations with Gmail(r). Other 5824 services requests could raise command syntax and response data issues. 5825 5826 It creates its table and field names "statically", 5827 meaning that the developer should leave the table and field 5828 definitions to the DAL instance by calling the adapter's 5829 .define_tables() method. The tables are defined with the 5830 IMAP server mailbox list information. 5831 5832 .define_tables() returns a dictionary mapping dal tablenames 5833 to the server mailbox names with the following structure: 5834 5835 {<tablename>: str <server mailbox name>} 5836 5837 Here is a list of supported fields: 5838 5839 Field Type Description 5840 ################################################################ 5841 uid string 5842 answered boolean Flag 5843 created date 5844 content list:string A list of dict text or html parts 5845 to string 5846 cc string 5847 bcc string 5848 size integer the amount of octets of the message* 5849 deleted boolean Flag 5850 draft boolean Flag 5851 flagged boolean Flag 5852 sender string 5853 recent boolean Flag 5854 seen boolean Flag 5855 subject string 5856 mime string The mime header declaration 5857 email string The complete RFC822 message** 5858 attachments <type list> Each non text part as dict 5859 encoding string The main detected encoding 5860 5861 *At the application side it is measured as the length of the RFC822 5862 message string 5863 5864 WARNING: As row id's are mapped to email sequence numbers, 5865 make sure your imap client web2py app does not delete messages 5866 during select or update actions, to prevent 5867 updating or deleting different messages. 5868 Sequence numbers change whenever the mailbox is updated. 5869 To avoid this sequence numbers issues, it is recommended the use 5870 of uid fields in query references (although the update and delete 5871 in separate actions rule still applies). 5872 5873 # This is the code recommended to start imap support 5874 # at the app's model: 5875 5876 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5877 imapdb.define_tables() 5878 5879 Here is an (incomplete) list of possible imap commands: 5880 5881 # Count today's unseen messages 5882 # smaller than 6000 octets from the 5883 # inbox mailbox 5884 5885 q = imapdb.INBOX.seen == False 5886 q &= imapdb.INBOX.created == datetime.date.today() 5887 q &= imapdb.INBOX.size < 6000 5888 unread = imapdb(q).count() 5889 5890 # Fetch last query messages 5891 rows = imapdb(q).select() 5892 5893 # it is also possible to filter query select results with limitby and 5894 # sequences of mailbox fields 5895 5896 set.select(<fields sequence>, limitby=(<int>, <int>)) 5897 5898 # Mark last query messages as seen 5899 messages = [row.uid for row in rows] 5900 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5901 5902 # Delete messages in the imap database that have mails from mr. Gumby 5903 5904 deleted = 0 5905 for mailbox in imapdb.tables 5906 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5907 5908 # It is possible also to mark messages for deletion instead of ereasing them 5909 # directly with set.update(deleted=True) 5910 5911 5912 # This object give access 5913 # to the adapter auto mailbox 5914 # mapped names (which native 5915 # mailbox has what table name) 5916 5917 imapdb.mailboxes <dict> # tablename, server native name pairs 5918 5919 # To retrieve a table native mailbox name use: 5920 imapdb.<table>.mailbox 5921 5922 ### New features v2.4.1: 5923 5924 # Declare mailboxes statically with tablename, name pairs 5925 # This avoids the extra server names retrieval 5926 5927 imapdb.define_tables({"inbox": "INBOX"}) 5928 5929 # Selects without content/attachments/email columns will only 5930 # fetch header and flags 5931 5932 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5933 """ 5934 5935 types = { 5936 'string': str, 5937 'text': str, 5938 'date': datetime.date, 5939 'datetime': datetime.datetime, 5940 'id': long, 5941 'boolean': bool, 5942 'integer': int, 5943 'bigint': long, 5944 'blob': str, 5945 'list:string': str, 5946 } 5947 5948 dbengine = 'imap' 5949 5950 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5951
5952 - def __init__(self, 5953 db, 5954 uri, 5955 pool_size=0, 5956 folder=None, 5957 db_codec ='UTF-8', 5958 credential_decoder=IDENTITY, 5959 driver_args={}, 5960 adapter_args={}, 5961 do_connect=True, 5962 after_connection=None):
5963 5964 # db uri: user@example.com:password@imap.server.com:123 5965 # TODO: max size adapter argument for preventing large mail transfers 5966 5967 self.db = db 5968 self.uri = uri 5969 if do_connect: self.find_driver(adapter_args) 5970 self.pool_size=pool_size 5971 self.folder = folder 5972 self.db_codec = db_codec 5973 self._after_connection = after_connection 5974 self.credential_decoder = credential_decoder 5975 self.driver_args = driver_args 5976 self.adapter_args = adapter_args 5977 self.mailbox_size = None 5978 self.static_names = None 5979 self.charset = sys.getfilesystemencoding() 5980 # imap class 5981 self.imap4 = None 5982 uri = uri.split("://")[1] 5983 5984 """ MESSAGE is an identifier for sequence number""" 5985 5986 self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft', 5987 'flagged': '\\Flagged', 'recent': '\\Recent', 5988 'seen': '\\Seen', 'answered': '\\Answered'} 5989 self.search_fields = { 5990 'id': 'MESSAGE', 'created': 'DATE', 5991 'uid': 'UID', 'sender': 'FROM', 5992 'to': 'TO', 'cc': 'CC', 5993 'bcc': 'BCC', 'content': 'TEXT', 5994 'size': 'SIZE', 'deleted': '\\Deleted', 5995 'draft': '\\Draft', 'flagged': '\\Flagged', 5996 'recent': '\\Recent', 'seen': '\\Seen', 5997 'subject': 'SUBJECT', 'answered': '\\Answered', 5998 'mime': None, 'email': None, 5999 'attachments': None 6000 } 6001 6002 db['_lastsql'] = '' 6003 6004 m = self.REGEX_URI.match(uri) 6005 user = m.group('user') 6006 password = m.group('password') 6007 host = m.group('host') 6008 port = int(m.group('port')) 6009 over_ssl = False 6010 if port==993: 6011 over_ssl = True 6012 6013 driver_args.update(host=host,port=port, password=password, user=user) 6014 def connector(driver_args=driver_args): 6015 # it is assumed sucessful authentication alLways 6016 # TODO: support direct connection and login tests 6017 if over_ssl: 6018 self.imap4 = self.driver.IMAP4_SSL 6019 else: 6020 self.imap4 = self.driver.IMAP4 6021 connection = self.imap4(driver_args["host"], driver_args["port"]) 6022 data = connection.login(driver_args["user"], driver_args["password"]) 6023 6024 # static mailbox list 6025 connection.mailbox_names = None 6026 6027 # dummy cursor function 6028 connection.cursor = lambda : True 6029 6030 return connection
6031 6032 self.db.define_tables = self.define_tables 6033 self.connector = connector 6034 if do_connect: self.reconnect()
6035
6036 - def reconnect(self, f=None, cursor=True):
6037 """ 6038 IMAP4 Pool connection method 6039 6040 imap connection lacks of self cursor command. 6041 A custom command should be provided as a replacement 6042 for connection pooling to prevent uncaught remote session 6043 closing 6044 6045 """ 6046 if getattr(self,'connection',None) != None: 6047 return 6048 if f is None: 6049 f = self.connector 6050 6051 if not self.pool_size: 6052 self.connection = f() 6053 self.cursor = cursor and self.connection.cursor() 6054 else: 6055 POOLS = ConnectionPool.POOLS 6056 uri = self.uri 6057 while True: 6058 GLOBAL_LOCKER.acquire() 6059 if not uri in POOLS: 6060 POOLS[uri] = [] 6061 if POOLS[uri]: 6062 self.connection = POOLS[uri].pop() 6063 GLOBAL_LOCKER.release() 6064 self.cursor = cursor and self.connection.cursor() 6065 if self.cursor and self.check_active_connection: 6066 try: 6067 # check if connection is alive or close it 6068 result, data = self.connection.list() 6069 except: 6070 # Possible connection reset error 6071 # TODO: read exception class 6072 self.connection = f() 6073 break 6074 else: 6075 GLOBAL_LOCKER.release() 6076 self.connection = f() 6077 self.cursor = cursor and self.connection.cursor() 6078 break 6079 self.after_connection_hook()
6080
6081 - def get_last_message(self, tablename):
6082 last_message = None 6083 # request mailbox list to the server 6084 # if needed 6085 if not isinstance(self.connection.mailbox_names, dict): 6086 self.get_mailboxes() 6087 try: 6088 result = self.connection.select(self.connection.mailbox_names[tablename]) 6089 last_message = int(result[1][0]) 6090 except (IndexError, ValueError, TypeError, KeyError): 6091 e = sys.exc_info()[1] 6092 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 6093 return last_message
6094
6095 - def get_uid_bounds(self, tablename):
6096 if not isinstance(self.connection.mailbox_names, dict): 6097 self.get_mailboxes() 6098 # fetch first and last messages 6099 # return (first, last) messages uid's 6100 last_message = self.get_last_message(tablename) 6101 result, data = self.connection.uid("search", None, "(ALL)") 6102 uid_list = data[0].strip().split() 6103 if len(uid_list) <= 0: 6104 return None 6105 else: 6106 return (uid_list[0], uid_list[-1])
6107
6108 - def convert_date(self, date, add=None):
6109 if add is None: 6110 add = datetime.timedelta() 6111 """ Convert a date object to a string 6112 with d-Mon-Y style for IMAP or the inverse 6113 case 6114 6115 add <timedelta> adds to the date object 6116 """ 6117 months = [None, "JAN","FEB","MAR","APR","MAY","JUN", 6118 "JUL", "AUG","SEP","OCT","NOV","DEC"] 6119 if isinstance(date, basestring): 6120 # Prevent unexpected date response format 6121 try: 6122 dayname, datestring = date.split(",") 6123 date_list = datestring.strip().split() 6124 year = int(date_list[2]) 6125 month = months.index(date_list[1].upper()) 6126 day = int(date_list[0]) 6127 hms = map(int, date_list[3].split(":")) 6128 return datetime.datetime(year, month, day, 6129 hms[0], hms[1], hms[2]) + add 6130 except (ValueError, AttributeError, IndexError), e: 6131 LOGGER.error("Could not parse date text: %s. %s" % 6132 (date, e)) 6133 return None 6134 elif isinstance(date, (datetime.datetime, datetime.date)): 6135 return (date + add).strftime("%d-%b-%Y") 6136 else: 6137 return None
6138 6139 @staticmethod
6140 - def header_represent(f, r):
6141 from email.header import decode_header 6142 text, encoding = decode_header(f)[0] 6143 if encoding: 6144 text = text.decode(encoding).encode('utf-8') 6145 return text
6146
6147 - def encode_text(self, text, charset, errors="replace"):
6148 """ convert text for mail to unicode""" 6149 if text is None: 6150 text = "" 6151 else: 6152 if isinstance(text, str): 6153 if charset is None: 6154 text = unicode(text, "utf-8", errors) 6155 else: 6156 text = unicode(text, charset, errors) 6157 else: 6158 raise Exception("Unsupported mail text type %s" % type(text)) 6159 return text.encode("utf-8")
6160
6161 - def get_charset(self, message):
6162 charset = message.get_content_charset() 6163 return charset
6164
6165 - def get_mailboxes(self):
6166 """ Query the mail database for mailbox names """ 6167 if self.static_names: 6168 # statically defined mailbox names 6169 self.connection.mailbox_names = self.static_names 6170 return self.static_names.keys() 6171 6172 mailboxes_list = self.connection.list() 6173 self.connection.mailbox_names = dict() 6174 mailboxes = list() 6175 x = 0 6176 for item in mailboxes_list[1]: 6177 x = x + 1 6178 item = item.strip() 6179 if not "NOSELECT" in item.upper(): 6180 sub_items = item.split("\"") 6181 sub_items = [sub_item for sub_item in sub_items \ 6182 if len(sub_item.strip()) > 0] 6183 # mailbox = sub_items[len(sub_items) -1] 6184 mailbox = sub_items[-1] 6185 # remove unwanted characters and store original names 6186 # Don't allow leading non alphabetic characters 6187 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6188 mailboxes.append(mailbox_name) 6189 self.connection.mailbox_names[mailbox_name] = mailbox 6190 6191 return mailboxes
6192
6193 - def get_query_mailbox(self, query):
6194 nofield = True 6195 tablename = None 6196 attr = query 6197 while nofield: 6198 if hasattr(attr, "first"): 6199 attr = attr.first 6200 if isinstance(attr, Field): 6201 return attr.tablename 6202 elif isinstance(attr, Query): 6203 pass 6204 else: 6205 return None 6206 else: 6207 return None 6208 return tablename
6209
6210 - def is_flag(self, flag):
6211 if self.search_fields.get(flag, None) in self.flags.values(): 6212 return True 6213 else: 6214 return False
6215
6216 - def define_tables(self, mailbox_names=None):
6217 """ 6218 Auto create common IMAP fileds 6219 6220 This function creates fields definitions "statically" 6221 meaning that custom fields as in other adapters should 6222 not be supported and definitions handled on a service/mode 6223 basis (local syntax for Gmail(r), Ymail(r) 6224 6225 Returns a dictionary with tablename, server native mailbox name 6226 pairs. 6227 """ 6228 if mailbox_names: 6229 # optional statically declared mailboxes 6230 self.static_names = mailbox_names 6231 else: 6232 self.static_names = None 6233 if not isinstance(self.connection.mailbox_names, dict): 6234 self.get_mailboxes() 6235 6236 names = self.connection.mailbox_names.keys() 6237 6238 for name in names: 6239 self.db.define_table("%s" % name, 6240 Field("uid", "string", writable=False), 6241 Field("answered", "boolean"), 6242 Field("created", "datetime", writable=False), 6243 Field("content", list, writable=False), 6244 Field("to", "string", writable=False), 6245 Field("cc", "string", writable=False), 6246 Field("bcc", "string", writable=False), 6247 Field("size", "integer", writable=False), 6248 Field("deleted", "boolean"), 6249 Field("draft", "boolean"), 6250 Field("flagged", "boolean"), 6251 Field("sender", "string", writable=False), 6252 Field("recent", "boolean", writable=False), 6253 Field("seen", "boolean"), 6254 Field("subject", "string", writable=False), 6255 Field("mime", "string", writable=False), 6256 Field("email", "string", writable=False, readable=False), 6257 Field("attachments", list, writable=False, readable=False), 6258 Field("encoding", writable=False) 6259 ) 6260 6261 # Set a special _mailbox attribute for storing 6262 # native mailbox names 6263 self.db[name].mailbox = \ 6264 self.connection.mailbox_names[name] 6265 6266 # decode quoted printable 6267 self.db[name].to.represent = self.db[name].cc.represent = \ 6268 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6269 self.db[name].subject.represent = self.header_represent 6270 6271 # Set the db instance mailbox collections 6272 self.db.mailboxes = self.connection.mailbox_names 6273 return self.db.mailboxes
6274
6275 - def create_table(self, *args, **kwargs):
6276 # not implemented 6277 # but required by DAL 6278 pass
6279
6280 - def _select(self, query, fields, attributes):
6281 if use_common_filters(query): 6282 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6283 return str(query)
6284
6285 - def select(self, query, fields, attributes):
6286 """ Search and Fetch records and return web2py rows 6287 """ 6288 # move this statement elsewhere (upper-level) 6289 if use_common_filters(query): 6290 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6291 6292 import email 6293 # get records from imap server with search + fetch 6294 # convert results to a dictionary 6295 tablename = None 6296 fetch_results = list() 6297 6298 if isinstance(query, Query): 6299 tablename = self.get_table(query) 6300 mailbox = self.connection.mailbox_names.get(tablename, None) 6301 if mailbox is None: 6302 raise ValueError("Mailbox name not found: %s" % mailbox) 6303 else: 6304 # select with readonly 6305 result, selected = self.connection.select(mailbox, True) 6306 if result != "OK": 6307 raise Exception("IMAP error: %s" % selected) 6308 self.mailbox_size = int(selected[0]) 6309 search_query = "(%s)" % str(query).strip() 6310 search_result = self.connection.uid("search", None, search_query) 6311 # Normal IMAP response OK is assumed (change this) 6312 if search_result[0] == "OK": 6313 # For "light" remote server responses just get the first 6314 # ten records (change for non-experimental implementation) 6315 # However, light responses are not guaranteed with this 6316 # approach, just fewer messages. 6317 limitby = attributes.get('limitby', None) 6318 messages_set = search_result[1][0].split() 6319 # descending order 6320 messages_set.reverse() 6321 if limitby is not None: 6322 # TODO: orderby, asc/desc, limitby from complete message set 6323 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6324 6325 # keep the requests small for header/flags 6326 if any([(field.name in ["content", "size", 6327 "attachments", "email"]) for 6328 field in fields]): 6329 imap_fields = "(RFC822 FLAGS)" 6330 else: 6331 imap_fields = "(RFC822.HEADER FLAGS)" 6332 6333 if len(messages_set) > 0: 6334 # create fetch results object list 6335 # fetch each remote message and store it in memmory 6336 # (change to multi-fetch command syntax for faster 6337 # transactions) 6338 for uid in messages_set: 6339 # fetch the RFC822 message body 6340 typ, data = self.connection.uid("fetch", uid, imap_fields) 6341 if typ == "OK": 6342 fr = {"message": int(data[0][0].split()[0]), 6343 "uid": long(uid), 6344 "email": email.message_from_string(data[0][1]), 6345 "raw_message": data[0][1]} 6346 fr["multipart"] = fr["email"].is_multipart() 6347 # fetch flags for the message 6348 fr["flags"] = self.driver.ParseFlags(data[1]) 6349 fetch_results.append(fr) 6350 else: 6351 # error retrieving the message body 6352 raise Exception("IMAP error retrieving the body: %s" % data) 6353 else: 6354 raise Exception("IMAP search error: %s" % search_result[1]) 6355 elif isinstance(query, (Expression, basestring)): 6356 raise NotImplementedError() 6357 else: 6358 raise TypeError("Unexpected query type") 6359 6360 imapqry_dict = {} 6361 imapfields_dict = {} 6362 6363 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6364 allfields = True 6365 elif len(fields) == 0: 6366 allfields = True 6367 else: 6368 allfields = False 6369 if allfields: 6370 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6371 else: 6372 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6373 6374 for k in colnames: 6375 imapfields_dict[k] = k 6376 6377 imapqry_list = list() 6378 imapqry_array = list() 6379 for fr in fetch_results: 6380 attachments = [] 6381 content = [] 6382 size = 0 6383 n = int(fr["message"]) 6384 item_dict = dict() 6385 message = fr["email"] 6386 uid = fr["uid"] 6387 charset = self.get_charset(message) 6388 flags = fr["flags"] 6389 raw_message = fr["raw_message"] 6390 # Return messages data mapping static fields 6391 # and fetched results. Mapping should be made 6392 # outside the select function (with auxiliary 6393 # instance methods) 6394 6395 # pending: search flags states trough the email message 6396 # instances for correct output 6397 6398 # preserve subject encoding (ASCII/quoted printable) 6399 6400 if "%s.id" % tablename in colnames: 6401 item_dict["%s.id" % tablename] = n 6402 if "%s.created" % tablename in colnames: 6403 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6404 if "%s.uid" % tablename in colnames: 6405 item_dict["%s.uid" % tablename] = uid 6406 if "%s.sender" % tablename in colnames: 6407 # If there is no encoding found in the message header 6408 # force utf-8 replacing characters (change this to 6409 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6410 item_dict["%s.sender" % tablename] = message["From"] 6411 if "%s.to" % tablename in colnames: 6412 item_dict["%s.to" % tablename] = message["To"] 6413 if "%s.cc" % tablename in colnames: 6414 if "Cc" in message.keys(): 6415 item_dict["%s.cc" % tablename] = message["Cc"] 6416 else: 6417 item_dict["%s.cc" % tablename] = "" 6418 if "%s.bcc" % tablename in colnames: 6419 if "Bcc" in message.keys(): 6420 item_dict["%s.bcc" % tablename] = message["Bcc"] 6421 else: 6422 item_dict["%s.bcc" % tablename] = "" 6423 if "%s.deleted" % tablename in colnames: 6424 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6425 if "%s.draft" % tablename in colnames: 6426 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6427 if "%s.flagged" % tablename in colnames: 6428 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6429 if "%s.recent" % tablename in colnames: 6430 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6431 if "%s.seen" % tablename in colnames: 6432 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6433 if "%s.subject" % tablename in colnames: 6434 item_dict["%s.subject" % tablename] = message["Subject"] 6435 if "%s.answered" % tablename in colnames: 6436 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6437 if "%s.mime" % tablename in colnames: 6438 item_dict["%s.mime" % tablename] = message.get_content_type() 6439 if "%s.encoding" % tablename in colnames: 6440 item_dict["%s.encoding" % tablename] = charset 6441 6442 # Here goes the whole RFC822 body as an email instance 6443 # for controller side custom processing 6444 # The message is stored as a raw string 6445 # >> email.message_from_string(raw string) 6446 # returns a Message object for enhanced object processing 6447 if "%s.email" % tablename in colnames: 6448 # WARNING: no encoding performed (raw message) 6449 item_dict["%s.email" % tablename] = raw_message 6450 6451 # Size measure as suggested in a Velocity Reviews post 6452 # by Tim Williams: "how to get size of email attachment" 6453 # Note: len() and server RFC822.SIZE reports doesn't match 6454 # To retrieve the server size for representation would add a new 6455 # fetch transaction to the process 6456 for part in message.walk(): 6457 maintype = part.get_content_maintype() 6458 if ("%s.attachments" % tablename in colnames) or \ 6459 ("%s.content" % tablename in colnames): 6460 payload = part.get_payload(decode=True) 6461 if payload: 6462 filename = part.get_filename() 6463 values = {"mime": part.get_content_type()} 6464 if ((filename or not "text" in maintype) and 6465 ("%s.attachments" % tablename in colnames)): 6466 values.update({"payload": payload, 6467 "filename": filename, 6468 "encoding": part.get_content_charset(), 6469 "disposition": part["Content-Disposition"]}) 6470 attachments.append(values) 6471 elif (("text" in maintype) and 6472 ("%s.content" % tablename in colnames)): 6473 values.update({"text": self.encode_text(payload, 6474 self.get_charset(part))}) 6475 content.append(values) 6476 6477 if "%s.size" % tablename in colnames: 6478 if part is not None: 6479 size += len(str(part)) 6480 item_dict["%s.content" % tablename] = content 6481 item_dict["%s.attachments" % tablename] = attachments 6482 item_dict["%s.size" % tablename] = size 6483 imapqry_list.append(item_dict) 6484 6485 # extra object mapping for the sake of rows object 6486 # creation (sends an array or lists) 6487 for item_dict in imapqry_list: 6488 imapqry_array_item = list() 6489 for fieldname in colnames: 6490 imapqry_array_item.append(item_dict[fieldname]) 6491 imapqry_array.append(imapqry_array_item) 6492 6493 # parse result and return a rows object 6494 colnames = colnames 6495 processor = attributes.get('processor',self.parse) 6496 return processor(imapqry_array, fields, colnames)
6497
6498 - def _insert(self, table, fields):
6499 def add_payload(message, obj): 6500 payload = Message() 6501 charset = obj.get("encoding", "utf-8") 6502 payload.set_type(obj.get("mime", None)) 6503 payload.set_charset(charset) 6504 if "text" in obj: 6505 payload.set_payload(obj["text"]) 6506 elif "payload" in obj: 6507 payload.set_payload(obj["payload"]) 6508 if "filename" in obj and obj["filename"]: 6509 payload.add_header("Content-Disposition", 6510 "attachment", filename=obj["filename"]) 6511 message.attach(payload)
6512 6513 mailbox = table.mailbox 6514 d = dict(((k.name, v) for k, v in fields)) 6515 date_time = (d.get("created", datetime.datetime.now())).timetuple() 6516 if len(d) > 0: 6517 message = d.get("email", None) 6518 attachments = d.get("attachments", []) 6519 content = d.get("content", []) 6520 flags = " ".join(["\\%s" % flag.capitalize() for flag in 6521 ("answered", "deleted", "draft", "flagged", 6522 "recent", "seen") if d.get(flag, False)]) 6523 if not message: 6524 from email.message import Message 6525 mime = d.get("mime", None) 6526 charset = d.get("encoding", None) 6527 message = Message() 6528 message["from"] = d.get("sender", "") 6529 message["subject"] = d.get("subject", "") 6530 if mime: 6531 message.set_type(mime) 6532 if charset: 6533 message.set_charset(charset) 6534 for item in ("to", "cc", "bcc"): 6535 value = d.get(item, "") 6536 if isinstance(value, basestring): 6537 message[item] = value 6538 else: 6539 message[item] = ";".join([i for i in 6540 value]) 6541 if not message.is_multipart(): 6542 if isinstance(content, basestring): 6543 message.set_payload(content) 6544 elif len(content) > 0: 6545 message.set_payload(content[0]["text"]) 6546 else: 6547 [add_payload(message, c) for c in content] 6548 [add_payload(message, a) for a in attachments] 6549 message = message.as_string() 6550 return (mailbox, flags, date_time, message) 6551 else: 6552 raise NotImplementedError("IMAP empty insert is not implemented") 6553
6554 - def insert(self, table, fields):
6555 values = self._insert(table, fields) 6556 result, data = self.connection.append(*values) 6557 if result == "OK": 6558 uid = int(re.findall("\d+", str(data))[-1]) 6559 return self.db(table.uid==uid).select(table.id).first().id 6560 else: 6561 raise Exception("IMAP message append failed: %s" % data)
6562
6563 - def _update(self, tablename, query, fields, commit=False):
6564 # TODO: the adapter should implement an .expand method 6565 commands = list() 6566 if use_common_filters(query): 6567 query = self.common_filter(query, [tablename,]) 6568 mark = [] 6569 unmark = [] 6570 if query: 6571 for item in fields: 6572 field = item[0] 6573 name = field.name 6574 value = item[1] 6575 if self.is_flag(name): 6576 flag = self.search_fields[name] 6577 if (value is not None) and (flag != "\\Recent"): 6578 if value: 6579 mark.append(flag) 6580 else: 6581 unmark.append(flag) 6582 result, data = self.connection.select( 6583 self.connection.mailbox_names[tablename]) 6584 string_query = "(%s)" % query 6585 result, data = self.connection.search(None, string_query) 6586 store_list = [item.strip() for item in data[0].split() 6587 if item.strip().isdigit()] 6588 # build commands for marked flags 6589 for number in store_list: 6590 result = None 6591 if len(mark) > 0: 6592 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6593 if len(unmark) > 0: 6594 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6595 return commands
6596
6597 - def update(self, tablename, query, fields):
6598 rowcount = 0 6599 commands = self._update(tablename, query, fields) 6600 for command in commands: 6601 result, data = self.connection.store(*command) 6602 if result == "OK": 6603 rowcount += 1 6604 else: 6605 raise Exception("IMAP storing error: %s" % data) 6606 return rowcount
6607
6608 - def _count(self, query, distinct=None):
6609 raise NotImplementedError()
6610
6611 - def count(self,query,distinct=None):
6612 counter = 0 6613 tablename = self.get_query_mailbox(query) 6614 if query and tablename is not None: 6615 if use_common_filters(query): 6616 query = self.common_filter(query, [tablename,]) 6617 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6618 string_query = "(%s)" % query 6619 result, data = self.connection.search(None, string_query) 6620 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6621 counter = len(store_list) 6622 return counter
6623
6624 - def delete(self, tablename, query):
6625 counter = 0 6626 if query: 6627 if use_common_filters(query): 6628 query = self.common_filter(query, [tablename,]) 6629 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6630 string_query = "(%s)" % query 6631 result, data = self.connection.search(None, string_query) 6632 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6633 for number in store_list: 6634 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6635 if result == "OK": 6636 counter += 1 6637 else: 6638 raise Exception("IMAP store error: %s" % data) 6639 if counter > 0: 6640 result, data = self.connection.expunge() 6641 return counter
6642
6643 - def BELONGS(self, first, second):
6644 result = None 6645 name = self.search_fields[first.name] 6646 if name == "MESSAGE": 6647 values = [str(val) for val in second if str(val).isdigit()] 6648 result = "%s" % ",".join(values).strip() 6649 6650 elif name == "UID": 6651 values = [str(val) for val in second if str(val).isdigit()] 6652 result = "UID %s" % ",".join(values).strip() 6653 6654 else: 6655 raise Exception("Operation not supported") 6656 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6657 return result
6658
6659 - def CONTAINS(self, first, second, case_sensitive=False):
6660 # silently ignore, only case sensitive 6661 result = None 6662 name = self.search_fields[first.name] 6663 6664 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6665 result = "%s \"%s\"" % (name, self.expand(second)) 6666 else: 6667 if first.name in ("cc", "bcc"): 6668 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6669 elif first.name == "mime": 6670 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6671 else: 6672 raise Exception("Operation not supported") 6673 return result
6674
6675 - def GT(self, first, second):
6676 result = None 6677 name = self.search_fields[first.name] 6678 if name == "MESSAGE": 6679 last_message = self.get_last_message(first.tablename) 6680 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6681 elif name == "UID": 6682 # GT and LT may not return 6683 # expected sets depending on 6684 # the uid format implemented 6685 try: 6686 pedestal, threshold = self.get_uid_bounds(first.tablename) 6687 except TypeError: 6688 e = sys.exc_info()[1] 6689 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6690 return "" 6691 try: 6692 lower_limit = int(self.expand(second)) + 1 6693 except (ValueError, TypeError): 6694 e = sys.exc_info()[1] 6695 raise Exception("Operation not supported (non integer UID)") 6696 result = "UID %s:%s" % (lower_limit, threshold) 6697 elif name == "DATE": 6698 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6699 elif name == "SIZE": 6700 result = "LARGER %s" % self.expand(second) 6701 else: 6702 raise Exception("Operation not supported") 6703 return result
6704
6705 - def GE(self, first, second):
6706 result = None 6707 name = self.search_fields[first.name] 6708 if name == "MESSAGE": 6709 last_message = self.get_last_message(first.tablename) 6710 result = "%s:%s" % (self.expand(second), last_message) 6711 elif name == "UID": 6712 # GT and LT may not return 6713 # expected sets depending on 6714 # the uid format implemented 6715 try: 6716 pedestal, threshold = self.get_uid_bounds(first.tablename) 6717 except TypeError: 6718 e = sys.exc_info()[1] 6719 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6720 return "" 6721 lower_limit = self.expand(second) 6722 result = "UID %s:%s" % (lower_limit, threshold) 6723 elif name == "DATE": 6724 result = "SINCE %s" % self.convert_date(second) 6725 else: 6726 raise Exception("Operation not supported") 6727 return result
6728
6729 - def LT(self, first, second):
6730 result = None 6731 name = self.search_fields[first.name] 6732 if name == "MESSAGE": 6733 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6734 elif name == "UID": 6735 try: 6736 pedestal, threshold = self.get_uid_bounds(first.tablename) 6737 except TypeError: 6738 e = sys.exc_info()[1] 6739 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6740 return "" 6741 try: 6742 upper_limit = int(self.expand(second)) - 1 6743 except (ValueError, TypeError): 6744 e = sys.exc_info()[1] 6745 raise Exception("Operation not supported (non integer UID)") 6746 result = "UID %s:%s" % (pedestal, upper_limit) 6747 elif name == "DATE": 6748 result = "BEFORE %s" % self.convert_date(second) 6749 elif name == "SIZE": 6750 result = "SMALLER %s" % self.expand(second) 6751 else: 6752 raise Exception("Operation not supported") 6753 return result
6754
6755 - def LE(self, first, second):
6756 result = None 6757 name = self.search_fields[first.name] 6758 if name == "MESSAGE": 6759 result = "%s:%s" % (1, self.expand(second)) 6760 elif name == "UID": 6761 try: 6762 pedestal, threshold = self.get_uid_bounds(first.tablename) 6763 except TypeError: 6764 e = sys.exc_info()[1] 6765 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6766 return "" 6767 upper_limit = int(self.expand(second)) 6768 result = "UID %s:%s" % (pedestal, upper_limit) 6769 elif name == "DATE": 6770 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6771 else: 6772 raise Exception("Operation not supported") 6773 return result
6774
6775 - def NE(self, first, second=None):
6776 if (second is None) and isinstance(first, Field): 6777 # All records special table query 6778 if first.type == "id": 6779 return self.GE(first, 1) 6780 result = self.NOT(self.EQ(first, second)) 6781 result = result.replace("NOT NOT", "").strip() 6782 return result
6783
6784 - def EQ(self,first,second):
6785 name = self.search_fields[first.name] 6786 result = None 6787 if name is not None: 6788 if name == "MESSAGE": 6789 # query by message sequence number 6790 result = "%s" % self.expand(second) 6791 elif name == "UID": 6792 result = "UID %s" % self.expand(second) 6793 elif name == "DATE": 6794 result = "ON %s" % self.convert_date(second) 6795 6796 elif name in self.flags.values(): 6797 if second: 6798 result = "%s" % (name.upper()[1:]) 6799 else: 6800 result = "NOT %s" % (name.upper()[1:]) 6801 else: 6802 raise Exception("Operation not supported") 6803 else: 6804 raise Exception("Operation not supported") 6805 return result
6806
6807 - def AND(self, first, second):
6808 result = "%s %s" % (self.expand(first), self.expand(second)) 6809 return result
6810
6811 - def OR(self, first, second):
6812 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6813 return "%s" % result.replace("OR OR", "OR")
6814
6815 - def NOT(self, first):
6816 result = "NOT %s" % self.expand(first) 6817 return result
6818 6819 ######################################################################## 6820 # end of adapters 6821 ######################################################################## 6822 6823 ADAPTERS = { 6824 'sqlite': SQLiteAdapter, 6825 'spatialite': SpatiaLiteAdapter, 6826 'sqlite:memory': SQLiteAdapter, 6827 'spatialite:memory': SpatiaLiteAdapter, 6828 'mysql': MySQLAdapter, 6829 'postgres': PostgreSQLAdapter, 6830 'postgres:psycopg2': PostgreSQLAdapter, 6831 'postgres:pg8000': PostgreSQLAdapter, 6832 'postgres2:psycopg2': NewPostgreSQLAdapter, 6833 'postgres2:pg8000': NewPostgreSQLAdapter, 6834 'oracle': OracleAdapter, 6835 'mssql': MSSQLAdapter, 6836 'mssql2': MSSQL2Adapter, 6837 'mssql3': MSSQL3Adapter, 6838 'vertica': VerticaAdapter, 6839 'sybase': SybaseAdapter, 6840 'db2': DB2Adapter, 6841 'teradata': TeradataAdapter, 6842 'informix': InformixAdapter, 6843 'informix-se': InformixSEAdapter, 6844 'firebird': FireBirdAdapter, 6845 'firebird_embedded': FireBirdAdapter, 6846 'ingres': IngresAdapter, 6847 'ingresu': IngresUnicodeAdapter, 6848 'sapdb': SAPDBAdapter, 6849 'cubrid': CubridAdapter, 6850 'jdbc:sqlite': JDBCSQLiteAdapter, 6851 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6852 'jdbc:postgres': JDBCPostgreSQLAdapter, 6853 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6854 'google:datastore': GoogleDatastoreAdapter, 6855 'google:sql': GoogleSQLAdapter, 6856 'couchdb': CouchDBAdapter, 6857 'mongodb': MongoDBAdapter, 6858 'imap': IMAPAdapter 6859 }
6860 6861 -def sqlhtml_validators(field):
6862 """ 6863 Field type validation, using web2py's validators mechanism. 6864 6865 makes sure the content of a field is in line with the declared 6866 fieldtype 6867 """ 6868 db = field.db 6869 try: 6870 from gluon import validators 6871 except ImportError: 6872 return [] 6873 field_type, field_length = field.type, field.length 6874 if isinstance(field_type, SQLCustomType): 6875 if hasattr(field_type, 'validator'): 6876 return field_type.validator 6877 else: 6878 field_type = field_type.type 6879 elif not isinstance(field_type,str): 6880 return [] 6881 requires=[] 6882 def ff(r,id): 6883 row=r(id) 6884 if not row: 6885 return id 6886 elif hasattr(r, '_format') and isinstance(r._format,str): 6887 return r._format % row 6888 elif hasattr(r, '_format') and callable(r._format): 6889 return r._format(row) 6890 else: 6891 return id
6892 if field_type in (('string', 'text', 'password')): 6893 requires.append(validators.IS_LENGTH(field_length)) 6894 elif field_type == 'json': 6895 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json))) 6896 elif field_type == 'double' or field_type == 'float': 6897 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6898 elif field_type in ('integer','bigint'): 6899 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 6900 elif field_type.startswith('decimal'): 6901 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6902 elif field_type == 'date': 6903 requires.append(validators.IS_DATE()) 6904 elif field_type == 'time': 6905 requires.append(validators.IS_TIME()) 6906 elif field_type == 'datetime': 6907 requires.append(validators.IS_DATETIME()) 6908 elif db and field_type.startswith('reference') and \ 6909 field_type.find('.') < 0 and \ 6910 field_type[10:] in db.tables: 6911 referenced = db[field_type[10:]] 6912 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6913 field.represent = field.represent or repr_ref 6914 if hasattr(referenced, '_format') and referenced._format: 6915 requires = validators.IS_IN_DB(db,referenced._id, 6916 referenced._format) 6917 if field.unique: 6918 requires._and = validators.IS_NOT_IN_DB(db,field) 6919 if field.tablename == field_type[10:]: 6920 return validators.IS_EMPTY_OR(requires) 6921 return requires 6922 elif db and field_type.startswith('list:reference') and \ 6923 field_type.find('.') < 0 and \ 6924 field_type[15:] in db.tables: 6925 referenced = db[field_type[15:]] 6926 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6927 if not ids: 6928 return None 6929 refs = None 6930 db, id = r._db, r._id 6931 if isinstance(db._adapter, GoogleDatastoreAdapter): 6932 def count(values): return db(id.belongs(values)).select(id) 6933 rx = range(0, len(ids), 30) 6934 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6935 else: 6936 refs = db(id.belongs(ids)).select(id) 6937 return (refs and ', '.join(f(r,x.id) for x in refs) or '') 6938 field.represent = field.represent or list_ref_repr 6939 if hasattr(referenced, '_format') and referenced._format: 6940 requires = validators.IS_IN_DB(db,referenced._id, 6941 referenced._format,multiple=True) 6942 else: 6943 requires = validators.IS_IN_DB(db,referenced._id, 6944 multiple=True) 6945 if field.unique: 6946 requires._and = validators.IS_NOT_IN_DB(db,field) 6947 if not field.notnull: 6948 requires = validators.IS_EMPTY_OR(requires) 6949 return requires 6950 elif field_type.startswith('list:'): 6951 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 6952 field.represent = field.represent or repr_list 6953 if field.unique: 6954 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 6955 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 6956 if field.notnull and not field_type[:2] in sff: 6957 requires.insert(0, validators.IS_NOT_EMPTY()) 6958 elif not field.notnull and field_type[:2] in sff and requires: 6959 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 6960 return requires 6961
6962 6963 -def bar_escape(item):
6964 return str(item).replace('|', '||')
6965
6966 -def bar_encode(items):
6967 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
6968
6969 -def bar_decode_integer(value):
6970 if not hasattr(value,'split') and hasattr(value,'read'): 6971 value = value.read() 6972 return [long(x) for x in value.split('|') if x.strip()]
6973
6974 -def bar_decode_string(value):
6975 return [x.replace('||', '|') for x in 6976 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
6977
6978 6979 -class Row(object):
6980 6981 """ 6982 a dictionary that lets you do d['a'] as well as d.a 6983 this is only used to store a Row 6984 """ 6985 6986 __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs) 6987
6988 - def __getitem__(self, k):
6989 key=str(k) 6990 _extra = self.__dict__.get('_extra', None) 6991 if _extra is not None: 6992 v = _extra.get(key, DEFAULT) 6993 if v != DEFAULT: 6994 return v 6995 m = REGEX_TABLE_DOT_FIELD.match(key) 6996 if m: 6997 try: 6998 return ogetattr(self, m.group(1))[m.group(2)] 6999 except (KeyError,AttributeError,TypeError): 7000 key = m.group(2) 7001 try: 7002 return ogetattr(self, key) 7003 except (KeyError,AttributeError,TypeError), ae: 7004 try: 7005 self[key] = ogetattr(self,'__get_lazy_reference__')(key) 7006 return self[key] 7007 except: 7008 raise ae
7009 7010 __setitem__ = lambda self, key, value: setattr(self, str(key), value) 7011 7012 __delitem__ = object.__delattr__ 7013 7014 __copy__ = lambda self: Row(self) 7015 7016 __call__ = __getitem__ 7017 7018
7019 - def get(self, key, default=None):
7020 try: 7021 return self.__getitem__(key) 7022 except(KeyError, AttributeError, TypeError): 7023 return self.__dict__.get(key,default)
7024 7025 has_key = __contains__ = lambda self, key: key in self.__dict__ 7026 7027 __nonzero__ = lambda self: len(self.__dict__)>0 7028 7029 update = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs) 7030 7031 keys = lambda self: self.__dict__.keys() 7032 7033 items = lambda self: self.__dict__.items() 7034 7035 values = lambda self: self.__dict__.values() 7036 7037 __iter__ = lambda self: self.__dict__.__iter__() 7038 7039 iteritems = lambda self: self.__dict__.iteritems() 7040 7041 __str__ = __repr__ = lambda self: '<Row %s>' % self.as_dict() 7042 7043 __int__ = lambda self: object.__getattribute__(self,'id') 7044 7045 __long__ = lambda self: long(object.__getattribute__(self,'id')) 7046 7047 __getattr__ = __getitem__ 7048 7049 # def __getattribute__(self, key): 7050 # try: 7051 # return object.__getattribute__(self, key) 7052 # except AttributeError, ae: 7053 # try: 7054 # return self.__get_lazy_reference__(key) 7055 # except: 7056 # raise ae 7057
7058 - def __eq__(self,other):
7059 try: 7060 return self.as_dict() == other.as_dict() 7061 except AttributeError: 7062 return False
7063
7064 - def __ne__(self,other):
7065 return not (self == other)
7066
7067 - def __copy__(self):
7068 return Row(dict(self))
7069
7070 - def as_dict(self, datetime_to_str=False, custom_types=None):
7071 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 7072 if isinstance(custom_types,(list,tuple,set)): 7073 SERIALIZABLE_TYPES += custom_types 7074 elif custom_types: 7075 SERIALIZABLE_TYPES.append(custom_types) 7076 d = dict(self) 7077 for k in copy.copy(d.keys()): 7078 v=d[k] 7079 if d[k] is None: 7080 continue 7081 elif isinstance(v,Row): 7082 d[k]=v.as_dict() 7083 elif isinstance(v,Reference): 7084 d[k]=long(v) 7085 elif isinstance(v,decimal.Decimal): 7086 d[k]=float(v) 7087 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 7088 if datetime_to_str: 7089 d[k] = v.isoformat().replace('T',' ')[:19] 7090 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 7091 del d[k] 7092 return d
7093
7094 - def as_xml(self, row_name="row", colnames=None, indent=' '):
7095 def f(row,field,indent=' '): 7096 if isinstance(row,Row): 7097 spc = indent+' \n' 7098 items = [f(row[x],x,indent+' ') for x in row] 7099 return '%s<%s>\n%s\n%s</%s>' % ( 7100 indent, 7101 field, 7102 spc.join(item for item in items if item), 7103 indent, 7104 field) 7105 elif not callable(row): 7106 if REGEX_ALPHANUMERIC.match(field): 7107 return '%s<%s>%s</%s>' % (indent,field,row,field) 7108 else: 7109 return '%s<extra name="%s">%s</extra>' % \ 7110 (indent,field,row) 7111 else: 7112 return None
7113 return f(self, row_name, indent=indent)
7114
7115 - def as_json(self, mode="object", default=None, colnames=None, 7116 serialize=True, **kwargs):
7117 """ 7118 serializes the row to a JSON object 7119 kwargs are passed to .as_dict method 7120 only "object" mode supported 7121 7122 serialize = False used by Rows.as_json 7123 TODO: return array mode with query column order 7124 7125 mode and colnames are not implemented 7126 """ 7127 7128 item = self.as_dict(**kwargs) 7129 if serialize: 7130 if have_serializers: 7131 return serializers.json(item, 7132 default=default or 7133 serializers.custom_json) 7134 elif simplejson: 7135 return simplejson.dumps(item) 7136 else: 7137 raise RuntimeError("missing simplejson") 7138 else: 7139 return item
7140
7141 7142 ################################################################################ 7143 # Everything below should be independent of the specifics of the database 7144 # and should work for RDBMs and some NoSQL databases 7145 ################################################################################ 7146 7147 -class SQLCallableList(list):
7148 - def __call__(self):
7149 return copy.copy(self)
7150
7151 -def smart_query(fields,text):
7152 if not isinstance(fields,(list,tuple)): 7153 fields = [fields] 7154 new_fields = [] 7155 for field in fields: 7156 if isinstance(field,Field): 7157 new_fields.append(field) 7158 elif isinstance(field,Table): 7159 for ofield in field: 7160 new_fields.append(ofield) 7161 else: 7162 raise RuntimeError("fields must be a list of fields") 7163 fields = new_fields 7164 field_map = {} 7165 for field in fields: 7166 n = field.name.lower() 7167 if not n in field_map: 7168 field_map[n] = field 7169 n = str(field).lower() 7170 if not n in field_map: 7171 field_map[n] = field 7172 constants = {} 7173 i = 0 7174 while True: 7175 m = REGEX_CONST_STRING.search(text) 7176 if not m: break 7177 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7178 constants[str(i)] = m.group()[1:-1] 7179 i+=1 7180 text = re.sub('\s+',' ',text).lower() 7181 for a,b in [('&','and'), 7182 ('|','or'), 7183 ('~','not'), 7184 ('==','='), 7185 ('<','<'), 7186 ('>','>'), 7187 ('<=','<='), 7188 ('>=','>='), 7189 ('<>','!='), 7190 ('=<','<='), 7191 ('=>','>='), 7192 ('=','='), 7193 (' less or equal than ','<='), 7194 (' greater or equal than ','>='), 7195 (' equal or less than ','<='), 7196 (' equal or greater than ','>='), 7197 (' less or equal ','<='), 7198 (' greater or equal ','>='), 7199 (' equal or less ','<='), 7200 (' equal or greater ','>='), 7201 (' not equal to ','!='), 7202 (' not equal ','!='), 7203 (' equal to ','='), 7204 (' equal ','='), 7205 (' equals ','='), 7206 (' less than ','<'), 7207 (' greater than ','>'), 7208 (' starts with ','startswith'), 7209 (' ends with ','endswith'), 7210 (' not in ' , 'notbelongs'), 7211 (' in ' , 'belongs'), 7212 (' is ','=')]: 7213 if a[0]==' ': 7214 text = text.replace(' is'+a,' %s ' % b) 7215 text = text.replace(a,' %s ' % b) 7216 text = re.sub('\s+',' ',text).lower() 7217 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7218 query = field = neg = op = logic = None 7219 for item in text.split(): 7220 if field is None: 7221 if item == 'not': 7222 neg = True 7223 elif not neg and not logic and item in ('and','or'): 7224 logic = item 7225 elif item in field_map: 7226 field = field_map[item] 7227 else: 7228 raise RuntimeError("Invalid syntax") 7229 elif not field is None and op is None: 7230 op = item 7231 elif not op is None: 7232 if item.startswith('#'): 7233 if not item[1:] in constants: 7234 raise RuntimeError("Invalid syntax") 7235 value = constants[item[1:]] 7236 else: 7237 value = item 7238 if field.type in ('text', 'string', 'json'): 7239 if op == '=': op = 'like' 7240 if op == '=': new_query = field==value 7241 elif op == '<': new_query = field<value 7242 elif op == '>': new_query = field>value 7243 elif op == '<=': new_query = field<=value 7244 elif op == '>=': new_query = field>=value 7245 elif op == '!=': new_query = field!=value 7246 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7247 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7248 elif field.type in ('text', 'string', 'json'): 7249 if op == 'contains': new_query = field.contains(value) 7250 elif op == 'like': new_query = field.like(value) 7251 elif op == 'startswith': new_query = field.startswith(value) 7252 elif op == 'endswith': new_query = field.endswith(value) 7253 else: raise RuntimeError("Invalid operation") 7254 elif field._db._adapter.dbengine=='google:datastore' and \ 7255 field.type in ('list:integer', 'list:string', 'list:reference'): 7256 if op == 'contains': new_query = field.contains(value) 7257 else: raise RuntimeError("Invalid operation") 7258 else: raise RuntimeError("Invalid operation") 7259 if neg: new_query = ~new_query 7260 if query is None: 7261 query = new_query 7262 elif logic == 'and': 7263 query &= new_query 7264 elif logic == 'or': 7265 query |= new_query 7266 field = op = neg = logic = None 7267 return query
7268
7269 -class DAL(object):
7270 7271 """ 7272 an instance of this class represents a database connection 7273 7274 Example:: 7275 7276 db = DAL('sqlite://test.db') 7277 7278 or 7279 7280 db = DAL(**{"uri": ..., "tables": [...]...}) # experimental 7281 7282 db.define_table('tablename', Field('fieldname1'), 7283 Field('fieldname2')) 7284 """ 7285
7286 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7287 if not hasattr(THREAD_LOCAL,'db_instances'): 7288 THREAD_LOCAL.db_instances = {} 7289 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7290 THREAD_LOCAL.db_instances_zombie = {} 7291 if uri == '<zombie>': 7292 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7293 if db_uid in THREAD_LOCAL.db_instances: 7294 db_group = THREAD_LOCAL.db_instances[db_uid] 7295 db = db_group[-1] 7296 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7297 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7298 else: 7299 db = super(DAL, cls).__new__(cls) 7300 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7301 else: 7302 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7303 if db_uid in THREAD_LOCAL.db_instances_zombie: 7304 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7305 del THREAD_LOCAL.db_instances_zombie[db_uid] 7306 else: 7307 db = super(DAL, cls).__new__(cls) 7308 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7309 db_group.append(db) 7310 THREAD_LOCAL.db_instances[db_uid] = db_group 7311 db._db_uid = db_uid 7312 return db
7313 7314 @staticmethod
7315 - def set_folder(folder):
7316 """ 7317 # ## this allows gluon to set a folder for this thread 7318 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7319 """ 7320 BaseAdapter.set_folder(folder)
7321 7322 @staticmethod
7323 - def get_instances():
7324 """ 7325 Returns a dictionary with uri as key with timings and defined tables 7326 {'sqlite://storage.sqlite': { 7327 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7328 'dbtables': { 7329 'defined': ['auth_cas', 'auth_event', 'auth_group', 7330 'auth_membership', 'auth_permission', 'auth_user'], 7331 'lazy': '[]' 7332 } 7333 } 7334 } 7335 """ 7336 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7337 infos = {} 7338 for db_uid, db_group in dbs: 7339 for db in db_group: 7340 if not db._uri: 7341 continue 7342 k = hide_password(db._uri) 7343 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7344 dbtables = {'defined': 7345 sorted(list(set(db.tables) - 7346 set(db._LAZY_TABLES.keys()))), 7347 'lazy': sorted(db._LAZY_TABLES.keys())} 7348 ) 7349 return infos
7350 7351 @staticmethod
7352 - def distributed_transaction_begin(*instances):
7353 if not instances: 7354 return 7355 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7356 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7357 instances = enumerate(instances) 7358 for (i, db) in instances: 7359 if not db._adapter.support_distributed_transaction(): 7360 raise SyntaxError( 7361 'distributed transaction not suported by %s' % db._dbname) 7362 for (i, db) in instances: 7363 db._adapter.distributed_transaction_begin(keys[i])
7364 7365 @staticmethod
7366 - def distributed_transaction_commit(*instances):
7367 if not instances: 7368 return 7369 instances = enumerate(instances) 7370 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7371 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7372 for (i, db) in instances: 7373 if not db._adapter.support_distributed_transaction(): 7374 raise SyntaxError( 7375 'distributed transaction not suported by %s' % db._dbanme) 7376 try: 7377 for (i, db) in instances: 7378 db._adapter.prepare(keys[i]) 7379 except: 7380 for (i, db) in instances: 7381 db._adapter.rollback_prepared(keys[i]) 7382 raise RuntimeError('failure to commit distributed transaction') 7383 else: 7384 for (i, db) in instances: 7385 db._adapter.commit_prepared(keys[i]) 7386 return
7387
7388 - def __init__(self, uri=DEFAULT_URI, 7389 pool_size=0, folder=None, 7390 db_codec='UTF-8', check_reserved=None, 7391 migrate=True, fake_migrate=False, 7392 migrate_enabled=True, fake_migrate_all=False, 7393 decode_credentials=False, driver_args=None, 7394 adapter_args=None, attempts=5, auto_import=False, 7395 bigint_id=False, debug=False, lazy_tables=False, 7396 db_uid=None, do_connect=True, 7397 after_connection=None, tables=None):
7398 """ 7399 Creates a new Database Abstraction Layer instance. 7400 7401 Keyword arguments: 7402 7403 :uri: string that contains information for connecting to a database. 7404 (default: 'sqlite://dummy.db') 7405 7406 experimental: you can specify a dictionary as uri 7407 parameter i.e. with 7408 db = DAL({"uri": "sqlite://storage.sqlite", 7409 "tables": {...}, ...}) 7410 7411 for an example of dict input you can check the output 7412 of the scaffolding db model with 7413 7414 db.as_dict() 7415 7416 Note that for compatibility with Python older than 7417 version 2.6.5 you should cast your dict input keys 7418 to str due to a syntax limitation on kwarg names. 7419 for proper DAL dictionary input you can use one of: 7420 7421 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7422 7423 or else (for parsing json input) 7424 7425 obj = serializers.loads_json(data, unicode_keys=False) 7426 7427 :pool_size: How many open connections to make to the database object. 7428 :folder: where .table files will be created. 7429 automatically set within web2py 7430 use an explicit path when using DAL outside web2py 7431 :db_codec: string encoding of the database (default: 'UTF-8') 7432 :check_reserved: list of adapters to check tablenames and column names 7433 against sql/nosql reserved keywords. (Default None) 7434 7435 * 'common' List of sql keywords that are common to all database types 7436 such as "SELECT, INSERT". (recommended) 7437 * 'all' Checks against all known SQL keywords. (not recommended) 7438 <adaptername> Checks against the specific adapters list of keywords 7439 (recommended) 7440 * '<adaptername>_nonreserved' Checks against the specific adapters 7441 list of nonreserved keywords. (if available) 7442 :migrate (defaults to True) sets default migrate behavior for all tables 7443 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7444 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7445 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7446 :attempts (defaults to 5). Number of times to attempt connecting 7447 :auto_import (defaults to False). If set, import automatically table definitions from the 7448 databases folder 7449 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7450 :lazy_tables (defaults to False): delay table definition until table access 7451 :after_connection (defaults to None): a callable that will be execute after the connection 7452 """ 7453 if uri == '<zombie>' and db_uid is not None: return 7454 if not decode_credentials: 7455 credential_decoder = lambda cred: cred 7456 else: 7457 credential_decoder = lambda cred: urllib.unquote(cred) 7458 self._folder = folder 7459 if folder: 7460 self.set_folder(folder) 7461 self._uri = uri 7462 self._pool_size = pool_size 7463 self._db_codec = db_codec 7464 self._lastsql = '' 7465 self._timings = [] 7466 self._pending_references = {} 7467 self._request_tenant = 'request_tenant' 7468 self._common_fields = [] 7469 self._referee_name = '%(table)s' 7470 self._bigint_id = bigint_id 7471 self._debug = debug 7472 self._migrated = [] 7473 self._LAZY_TABLES = {} 7474 self._lazy_tables = lazy_tables 7475 self._tables = SQLCallableList() 7476 self._driver_args = driver_args 7477 self._adapter_args = adapter_args 7478 self._check_reserved = check_reserved 7479 self._decode_credentials = decode_credentials 7480 self._attempts = attempts 7481 self._do_connect = do_connect 7482 7483 if not str(attempts).isdigit() or attempts < 0: 7484 attempts = 5 7485 if uri: 7486 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7487 error = '' 7488 connected = False 7489 for k in range(attempts): 7490 for uri in uris: 7491 try: 7492 if is_jdbc and not uri.startswith('jdbc:'): 7493 uri = 'jdbc:'+uri 7494 self._dbname = REGEX_DBNAME.match(uri).group() 7495 if not self._dbname in ADAPTERS: 7496 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7497 # notice that driver args or {} else driver_args 7498 # defaults to {} global, not correct 7499 kwargs = dict(db=self,uri=uri, 7500 pool_size=pool_size, 7501 folder=folder, 7502 db_codec=db_codec, 7503 credential_decoder=credential_decoder, 7504 driver_args=driver_args or {}, 7505 adapter_args=adapter_args or {}, 7506 do_connect=do_connect, 7507 after_connection=after_connection) 7508 self._adapter = ADAPTERS[self._dbname](**kwargs) 7509 types = ADAPTERS[self._dbname].types 7510 # copy so multiple DAL() possible 7511 self._adapter.types = copy.copy(types) 7512 self._adapter.build_parsemap() 7513 if bigint_id: 7514 if 'big-id' in types and 'reference' in types: 7515 self._adapter.types['id'] = types['big-id'] 7516 self._adapter.types['reference'] = types['big-reference'] 7517 connected = True 7518 break 7519 except SyntaxError: 7520 raise 7521 except Exception: 7522 tb = traceback.format_exc() 7523 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7524 if connected: 7525 break 7526 else: 7527 time.sleep(1) 7528 if not connected: 7529 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7530 else: 7531 self._adapter = BaseAdapter(db=self,pool_size=0, 7532 uri='None',folder=folder, 7533 db_codec=db_codec, after_connection=after_connection) 7534 migrate = fake_migrate = False 7535 adapter = self._adapter 7536 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7537 self.check_reserved = check_reserved 7538 if self.check_reserved: 7539 from reserved_sql_keywords import ADAPTERS as RSK 7540 self.RSK = RSK 7541 self._migrate = migrate 7542 self._fake_migrate = fake_migrate 7543 self._migrate_enabled = migrate_enabled 7544 self._fake_migrate_all = fake_migrate_all 7545 if auto_import or tables: 7546 self.import_table_definitions(adapter.folder, 7547 tables=tables)
7548 7549 @property
7550 - def tables(self):
7551 return self._tables
7552
7553 - def import_table_definitions(self, path, migrate=False, 7554 fake_migrate=False, tables=None):
7555 pattern = pjoin(path,self._uri_hash+'_*.table') 7556 if tables: 7557 for table in tables: 7558 self.define_table(**table) 7559 else: 7560 for filename in glob.glob(pattern): 7561 tfile = self._adapter.file_open(filename, 'r') 7562 try: 7563 sql_fields = pickle.load(tfile) 7564 name = filename[len(pattern)-7:-6] 7565 mf = [(value['sortable'], 7566 Field(key, 7567 type=value['type'], 7568 length=value.get('length',None), 7569 notnull=value.get('notnull',False), 7570 unique=value.get('unique',False))) \ 7571 for key, value in sql_fields.iteritems()] 7572 mf.sort(lambda a,b: cmp(a[0],b[0])) 7573 self.define_table(name,*[item[1] for item in mf], 7574 **dict(migrate=migrate, 7575 fake_migrate=fake_migrate)) 7576 finally: 7577 self._adapter.file_close(tfile)
7578
7579 - def check_reserved_keyword(self, name):
7580 """ 7581 Validates ``name`` against SQL keywords 7582 Uses self.check_reserve which is a list of 7583 operators to use. 7584 self.check_reserved 7585 ['common', 'postgres', 'mysql'] 7586 self.check_reserved 7587 ['all'] 7588 """ 7589 for backend in self.check_reserved: 7590 if name.upper() in self.RSK[backend]: 7591 raise SyntaxError( 7592 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7593
7594 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7595 """ 7596 EXAMPLE: 7597 7598 db.define_table('person',Field('name'),Field('info')) 7599 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7600 7601 @request.restful() 7602 def index(): 7603 def GET(*args,**vars): 7604 patterns = [ 7605 "/friends[person]", 7606 "/{person.name}/:field", 7607 "/{person.name}/pets[pet.ownedby]", 7608 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7609 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7610 ("/dogs[pet]", db.pet.info=='dog'), 7611 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7612 ] 7613 parser = db.parse_as_rest(patterns,args,vars) 7614 if parser.status == 200: 7615 return dict(content=parser.response) 7616 else: 7617 raise HTTP(parser.status,parser.error) 7618 7619 def POST(table_name,**vars): 7620 if table_name == 'person': 7621 return db.person.validate_and_insert(**vars) 7622 elif table_name == 'pet': 7623 return db.pet.validate_and_insert(**vars) 7624 else: 7625 raise HTTP(400) 7626 return locals() 7627 """ 7628 7629 db = self 7630 re1 = REGEX_SEARCH_PATTERN 7631 re2 = REGEX_SQUARE_BRACKETS 7632 7633 def auto_table(table,base='',depth=0): 7634 patterns = [] 7635 for field in db[table].fields: 7636 if base: 7637 tag = '%s/%s' % (base,field.replace('_','-')) 7638 else: 7639 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7640 f = db[table][field] 7641 if not f.readable: continue 7642 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7643 tag += '/{%s.%s}' % (table,field) 7644 patterns.append(tag) 7645 patterns.append(tag+'/:field') 7646 elif f.type.startswith('boolean'): 7647 tag += '/{%s.%s}' % (table,field) 7648 patterns.append(tag) 7649 patterns.append(tag+'/:field') 7650 elif f.type in ('float','double','integer','bigint'): 7651 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7652 patterns.append(tag) 7653 patterns.append(tag+'/:field') 7654 elif f.type.startswith('list:'): 7655 tag += '/{%s.%s.contains}' % (table,field) 7656 patterns.append(tag) 7657 patterns.append(tag+'/:field') 7658 elif f.type in ('date','datetime'): 7659 tag+= '/{%s.%s.year}' % (table,field) 7660 patterns.append(tag) 7661 patterns.append(tag+'/:field') 7662 tag+='/{%s.%s.month}' % (table,field) 7663 patterns.append(tag) 7664 patterns.append(tag+'/:field') 7665 tag+='/{%s.%s.day}' % (table,field) 7666 patterns.append(tag) 7667 patterns.append(tag+'/:field') 7668 if f.type in ('datetime','time'): 7669 tag+= '/{%s.%s.hour}' % (table,field) 7670 patterns.append(tag) 7671 patterns.append(tag+'/:field') 7672 tag+='/{%s.%s.minute}' % (table,field) 7673 patterns.append(tag) 7674 patterns.append(tag+'/:field') 7675 tag+='/{%s.%s.second}' % (table,field) 7676 patterns.append(tag) 7677 patterns.append(tag+'/:field') 7678 if depth>0: 7679 for f in db[table]._referenced_by: 7680 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7681 patterns.append(tag) 7682 patterns += auto_table(table,base=tag,depth=depth-1) 7683 return patterns
7684 7685 if patterns == 'auto': 7686 patterns=[] 7687 for table in db.tables: 7688 if not table.startswith('auth_'): 7689 patterns.append('/%s[%s]' % (table,table)) 7690 patterns += auto_table(table,base='',depth=1) 7691 else: 7692 i = 0 7693 while i<len(patterns): 7694 pattern = patterns[i] 7695 if not isinstance(pattern,str): 7696 pattern = pattern[0] 7697 tokens = pattern.split('/') 7698 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7699 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7700 '/'.join(tokens[:-1])) 7701 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7702 i += len(new_patterns) 7703 else: 7704 i += 1 7705 if '/'.join(args) == 'patterns': 7706 return Row({'status':200,'pattern':'list', 7707 'error':None,'response':patterns}) 7708 for pattern in patterns: 7709 basequery, exposedfields = None, [] 7710 if isinstance(pattern,tuple): 7711 if len(pattern)==2: 7712 pattern, basequery = pattern 7713 elif len(pattern)>2: 7714 pattern, basequery, exposedfields = pattern[0:3] 7715 otable=table=None 7716 if not isinstance(queries,dict): 7717 dbset=db(queries) 7718 if basequery is not None: 7719 dbset = dbset(basequery) 7720 i=0 7721 tags = pattern[1:].split('/') 7722 if len(tags)!=len(args): 7723 continue 7724 for tag in tags: 7725 if re1.match(tag): 7726 # print 're1:'+tag 7727 tokens = tag[1:-1].split('.') 7728 table, field = tokens[0], tokens[1] 7729 if not otable or table == otable: 7730 if len(tokens)==2 or tokens[2]=='eq': 7731 query = db[table][field]==args[i] 7732 elif tokens[2]=='ne': 7733 query = db[table][field]!=args[i] 7734 elif tokens[2]=='lt': 7735 query = db[table][field]<args[i] 7736 elif tokens[2]=='gt': 7737 query = db[table][field]>args[i] 7738 elif tokens[2]=='ge': 7739 query = db[table][field]>=args[i] 7740 elif tokens[2]=='le': 7741 query = db[table][field]<=args[i] 7742 elif tokens[2]=='year': 7743 query = db[table][field].year()==args[i] 7744 elif tokens[2]=='month': 7745 query = db[table][field].month()==args[i] 7746 elif tokens[2]=='day': 7747 query = db[table][field].day()==args[i] 7748 elif tokens[2]=='hour': 7749 query = db[table][field].hour()==args[i] 7750 elif tokens[2]=='minute': 7751 query = db[table][field].minutes()==args[i] 7752 elif tokens[2]=='second': 7753 query = db[table][field].seconds()==args[i] 7754 elif tokens[2]=='startswith': 7755 query = db[table][field].startswith(args[i]) 7756 elif tokens[2]=='contains': 7757 query = db[table][field].contains(args[i]) 7758 else: 7759 raise RuntimeError("invalid pattern: %s" % pattern) 7760 if len(tokens)==4 and tokens[3]=='not': 7761 query = ~query 7762 elif len(tokens)>=4: 7763 raise RuntimeError("invalid pattern: %s" % pattern) 7764 if not otable and isinstance(queries,dict): 7765 dbset = db(queries[table]) 7766 if basequery is not None: 7767 dbset = dbset(basequery) 7768 dbset=dbset(query) 7769 else: 7770 raise RuntimeError("missing relation in pattern: %s" % pattern) 7771 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7772 ref = tag[tag.find('[')+1:-1] 7773 if '.' in ref and otable: 7774 table,field = ref.split('.') 7775 selfld = '_id' 7776 if db[table][field].type.startswith('reference '): 7777 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7778 else: 7779 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7780 if refs: 7781 selfld = refs[0] 7782 if nested_select: 7783 try: 7784 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7785 except ValueError: 7786 return Row({'status':400,'pattern':pattern, 7787 'error':'invalid path','response':None}) 7788 else: 7789 items = [item.id for item in dbset.select(db[otable][selfld])] 7790 dbset=db(db[table][field].belongs(items)) 7791 else: 7792 table = ref 7793 if not otable and isinstance(queries,dict): 7794 dbset = db(queries[table]) 7795 dbset=dbset(db[table]) 7796 elif tag==':field' and table: 7797 # print 're3:'+tag 7798 field = args[i] 7799 if not field in db[table]: break 7800 # hand-built patterns should respect .readable=False as well 7801 if not db[table][field].readable: 7802 return Row({'status':418,'pattern':pattern, 7803 'error':'I\'m a teapot','response':None}) 7804 try: 7805 distinct = vars.get('distinct', False) == 'True' 7806 offset = long(vars.get('offset',None) or 0) 7807 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7808 except ValueError: 7809 return Row({'status':400,'error':'invalid limits','response':None}) 7810 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7811 if items: 7812 return Row({'status':200,'response':items, 7813 'pattern':pattern}) 7814 else: 7815 return Row({'status':404,'pattern':pattern, 7816 'error':'no record found','response':None}) 7817 elif tag != args[i]: 7818 break 7819 otable = table 7820 i += 1 7821 if i==len(tags) and table: 7822 ofields = vars.get('order',db[table]._id.name).split('|') 7823 try: 7824 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7825 except (KeyError, AttributeError): 7826 return Row({'status':400,'error':'invalid orderby','response':None}) 7827 if exposedfields: 7828 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7829 else: 7830 fields = [field for field in db[table] if field.readable] 7831 count = dbset.count() 7832 try: 7833 offset = long(vars.get('offset',None) or 0) 7834 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7835 except ValueError: 7836 return Row({'status':400,'error':'invalid limits','response':None}) 7837 if count > limits[1]-limits[0]: 7838 return Row({'status':400,'error':'too many records','response':None}) 7839 try: 7840 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7841 except ValueError: 7842 return Row({'status':400,'pattern':pattern, 7843 'error':'invalid path','response':None}) 7844 return Row({'status':200,'response':response, 7845 'pattern':pattern,'count':count}) 7846 return Row({'status':400,'error':'no matching pattern','response':None})
7847
7848 - def define_table( 7849 self, 7850 tablename, 7851 *fields, 7852 **args 7853 ):
7854 if not fields and 'fields' in args: 7855 fields = args.get('fields',()) 7856 if not isinstance(tablename, str): 7857 if isinstance(tablename, unicode): 7858 try: 7859 tablename = str(tablename) 7860 except UnicodeEncodeError: 7861 raise SyntaxError("invalid unicode table name") 7862 else: 7863 raise SyntaxError("missing table name") 7864 elif hasattr(self,tablename) or tablename in self.tables: 7865 if not args.get('redefine',False): 7866 raise SyntaxError('table already defined: %s' % tablename) 7867 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7868 REGEX_PYTHON_KEYWORDS.match(tablename): 7869 raise SyntaxError('invalid table name: %s' % tablename) 7870 elif self.check_reserved: 7871 self.check_reserved_keyword(tablename) 7872 else: 7873 invalid_args = set(args)-TABLE_ARGS 7874 if invalid_args: 7875 raise SyntaxError('invalid table "%s" attributes: %s' \ 7876 % (tablename,invalid_args)) 7877 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7878 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7879 table = None 7880 else: 7881 table = self.lazy_define_table(tablename,*fields,**args) 7882 if not tablename in self.tables: 7883 self.tables.append(tablename) 7884 return table
7885
7886 - def lazy_define_table( 7887 self, 7888 tablename, 7889 *fields, 7890 **args 7891 ):
7892 args_get = args.get 7893 common_fields = self._common_fields 7894 if common_fields: 7895 fields = list(fields) + list(common_fields) 7896 7897 table_class = args_get('table_class',Table) 7898 table = table_class(self, tablename, *fields, **args) 7899 table._actual = True 7900 self[tablename] = table 7901 # must follow above line to handle self references 7902 table._create_references() 7903 for field in table: 7904 if field.requires == DEFAULT: 7905 field.requires = sqlhtml_validators(field) 7906 7907 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7908 if migrate and not self._uri in (None,'None') \ 7909 or self._adapter.dbengine=='google:datastore': 7910 fake_migrate = self._fake_migrate_all or \ 7911 args_get('fake_migrate',self._fake_migrate) 7912 polymodel = args_get('polymodel',None) 7913 try: 7914 GLOBAL_LOCKER.acquire() 7915 self._lastsql = self._adapter.create_table( 7916 table,migrate=migrate, 7917 fake_migrate=fake_migrate, 7918 polymodel=polymodel) 7919 finally: 7920 GLOBAL_LOCKER.release() 7921 else: 7922 table._dbt = None 7923 on_define = args_get('on_define',None) 7924 if on_define: on_define(table) 7925 return table
7926
7927 - def as_dict(self, flat=False, sanitize=True):
7928 db_uid = uri = None 7929 if not sanitize: 7930 uri, db_uid = (self._uri, self._db_uid) 7931 db_as_dict = dict(tables=[], uri=uri, db_uid=db_uid, 7932 **dict([(k, getattr(self, "_" + k, None)) 7933 for k in 'pool_size','folder','db_codec', 7934 'check_reserved','migrate','fake_migrate', 7935 'migrate_enabled','fake_migrate_all', 7936 'decode_credentials','driver_args', 7937 'adapter_args', 'attempts', 7938 'bigint_id','debug','lazy_tables', 7939 'do_connect'])) 7940 for table in self: 7941 db_as_dict["tables"].append(table.as_dict(flat=flat, 7942 sanitize=sanitize)) 7943 return db_as_dict
7944
7945 - def as_xml(self, sanitize=True):
7946 if not have_serializers: 7947 raise ImportError("No xml serializers available") 7948 d = self.as_dict(flat=True, sanitize=sanitize) 7949 return serializers.xml(d)
7950
7951 - def as_json(self, sanitize=True):
7952 if not have_serializers: 7953 raise ImportError("No json serializers available") 7954 d = self.as_dict(flat=True, sanitize=sanitize) 7955 return serializers.json(d)
7956
7957 - def as_yaml(self, sanitize=True):
7958 if not have_serializers: 7959 raise ImportError("No YAML serializers available") 7960 d = self.as_dict(flat=True, sanitize=sanitize) 7961 return serializers.yaml(d)
7962
7963 - def __contains__(self, tablename):
7964 try: 7965 return tablename in self.tables 7966 except AttributeError: 7967 # The instance has no .tables attribute yet 7968 return False
7969 7970 has_key = __contains__ 7971
7972 - def get(self,key,default=None):
7973 return self.__dict__.get(key,default)
7974
7975 - def __iter__(self):
7976 for tablename in self.tables: 7977 yield self[tablename]
7978
7979 - def __getitem__(self, key):
7980 return self.__getattr__(str(key))
7981
7982 - def __getattr__(self, key):
7983 if ogetattr(self,'_lazy_tables') and \ 7984 key in ogetattr(self,'_LAZY_TABLES'): 7985 tablename, fields, args = self._LAZY_TABLES.pop(key) 7986 return self.lazy_define_table(tablename,*fields,**args) 7987 return ogetattr(self, key)
7988
7989 - def __setitem__(self, key, value):
7990 osetattr(self, str(key), value)
7991
7992 - def __setattr__(self, key, value):
7993 if key[:1]!='_' and key in self: 7994 raise SyntaxError( 7995 'Object %s exists and cannot be redefined' % key) 7996 osetattr(self,key,value)
7997 7998 __delitem__ = object.__delattr__ 7999
8000 - def __repr__(self):
8001 if hasattr(self,'_uri'): 8002 return '<DAL uri="%s">' % hide_password(str(self._uri)) 8003 else: 8004 return '<DAL db_uid="%s">' % self._db_uid
8005
8006 - def smart_query(self,fields,text):
8007 return Set(self, smart_query(fields,text))
8008
8009 - def __call__(self, query=None, ignore_common_filters=None):
8010 if isinstance(query,Table): 8011 query = self._adapter.id_query(query) 8012 elif isinstance(query,Field): 8013 query = query!=None 8014 elif isinstance(query, dict): 8015 icf = query.get("ignore_common_filters") 8016 if icf: ignore_common_filters = icf 8017 return Set(self, query, ignore_common_filters=ignore_common_filters)
8018
8019 - def commit(self):
8020 self._adapter.commit()
8021
8022 - def rollback(self):
8023 self._adapter.rollback()
8024
8025 - def close(self):
8026 self._adapter.close() 8027 if self._db_uid in THREAD_LOCAL.db_instances: 8028 db_group = THREAD_LOCAL.db_instances[self._db_uid] 8029 db_group.remove(self) 8030 if not db_group: 8031 del THREAD_LOCAL.db_instances[self._db_uid]
8032
8033 - def executesql(self, query, placeholders=None, as_dict=False, 8034 fields=None, colnames=None):
8035 """ 8036 placeholders is optional and will always be None. 8037 If using raw SQL with placeholders, placeholders may be 8038 a sequence of values to be substituted in 8039 or, (if supported by the DB driver), a dictionary with keys 8040 matching named placeholders in your SQL. 8041 8042 Added 2009-12-05 "as_dict" optional argument. Will always be 8043 None when using DAL. If using raw SQL can be set to True 8044 and the results cursor returned by the DB driver will be 8045 converted to a sequence of dictionaries keyed with the db 8046 field names. Tested with SQLite but should work with any database 8047 since the cursor.description used to get field names is part of the 8048 Python dbi 2.0 specs. Results returned with as_dict=True are 8049 the same as those returned when applying .to_list() to a DAL query. 8050 8051 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 8052 8053 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 8054 is provided, the results cursor returned by the DB driver will be 8055 converted to a DAL Rows object using the db._adapter.parse() method. 8056 8057 The "fields" argument is a list of DAL Field objects that match the 8058 fields returned from the DB. The Field objects should be part of one or 8059 more Table objects defined on the DAL object. The "fields" list can 8060 include one or more DAL Table objects in addition to or instead of 8061 including Field objects, or it can be just a single table (not in a 8062 list). In that case, the Field objects will be extracted from the 8063 table(s). 8064 8065 Instead of specifying the "fields" argument, the "colnames" argument 8066 can be specified as a list of field names in tablename.fieldname format. 8067 Again, these should represent tables and fields defined on the DAL 8068 object. 8069 8070 It is also possible to specify both "fields" and the associated 8071 "colnames". In that case, "fields" can also include DAL Expression 8072 objects in addition to Field objects. For Field objects in "fields", 8073 the associated "colnames" must still be in tablename.fieldname format. 8074 For Expression objects in "fields", the associated "colnames" can 8075 be any arbitrary labels. 8076 8077 Note, the DAL Table objects referred to by "fields" or "colnames" can 8078 be dummy tables and do not have to represent any real tables in the 8079 database. Also, note that the "fields" and "colnames" must be in the 8080 same order as the fields in the results cursor returned from the DB. 8081 """ 8082 adapter = self._adapter 8083 if placeholders: 8084 adapter.execute(query, placeholders) 8085 else: 8086 adapter.execute(query) 8087 if as_dict: 8088 if not hasattr(adapter.cursor,'description'): 8089 raise RuntimeError("database does not support executesql(...,as_dict=True)") 8090 # Non-DAL legacy db query, converts cursor results to dict. 8091 # sequence of 7-item sequences. each sequence tells about a column. 8092 # first item is always the field name according to Python Database API specs 8093 columns = adapter.cursor.description 8094 # reduce the column info down to just the field names 8095 fields = [f[0] for f in columns] 8096 # will hold our finished resultset in a list 8097 data = adapter._fetchall() 8098 # convert the list for each row into a dictionary so it's 8099 # easier to work with. row['field_name'] rather than row[0] 8100 return [dict(zip(fields,row)) for row in data] 8101 try: 8102 data = adapter._fetchall() 8103 except: 8104 return None 8105 if fields or colnames: 8106 fields = [] if fields is None else fields 8107 if not isinstance(fields, list): 8108 fields = [fields] 8109 extracted_fields = [] 8110 for field in fields: 8111 if isinstance(field, Table): 8112 extracted_fields.extend([f for f in field]) 8113 else: 8114 extracted_fields.append(field) 8115 if not colnames: 8116 colnames = ['%s.%s' % (f.tablename, f.name) 8117 for f in extracted_fields] 8118 data = adapter.parse( 8119 data, fields=extracted_fields, colnames=colnames) 8120 return data
8121
8122 - def _remove_references_to(self, thistable):
8123 for table in self: 8124 table._referenced_by = [field for field in table._referenced_by 8125 if not field.table==thistable]
8126
8127 - def export_to_csv_file(self, ofile, *args, **kwargs):
8128 step = long(kwargs.get('max_fetch_rows,',500)) 8129 write_colnames = kwargs['write_colnames'] = \ 8130 kwargs.get("write_colnames", True) 8131 for table in self.tables: 8132 ofile.write('TABLE %s\r\n' % table) 8133 query = self._adapter.id_query(self[table]) 8134 nrows = self(query).count() 8135 kwargs['write_colnames'] = write_colnames 8136 for k in range(0,nrows,step): 8137 self(query).select(limitby=(k,k+step)).export_to_csv_file( 8138 ofile, *args, **kwargs) 8139 kwargs['write_colnames'] = False 8140 ofile.write('\r\n\r\n') 8141 ofile.write('END')
8142
8143 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 8144 unique='uuid', map_tablenames=None, 8145 ignore_missing_tables=False, 8146 *args, **kwargs):
8147 #if id_map is None: id_map={} 8148 id_offset = {} # only used if id_map is None 8149 map_tablenames = map_tablenames or {} 8150 for line in ifile: 8151 line = line.strip() 8152 if not line: 8153 continue 8154 elif line == 'END': 8155 return 8156 elif not line.startswith('TABLE ') or \ 8157 not line[6:] in self.tables: 8158 raise SyntaxError('invalid file format') 8159 else: 8160 tablename = line[6:] 8161 tablename = map_tablenames.get(tablename,tablename) 8162 if tablename is not None and tablename in self.tables: 8163 self[tablename].import_from_csv_file( 8164 ifile, id_map, null, unique, id_offset, 8165 *args, **kwargs) 8166 elif tablename is None or ignore_missing_tables: 8167 # skip all non-empty lines 8168 for line in ifile: 8169 if not line.strip(): 8170 break 8171 else: 8172 raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
8173
8174 8175 -def DAL_unpickler(db_uid):
8176 return DAL('<zombie>',db_uid=db_uid)
8177
8178 -def DAL_pickler(db):
8179 return DAL_unpickler, (db._db_uid,)
8180 8181 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8182 8183 -class SQLALL(object):
8184 """ 8185 Helper class providing a comma-separated string having all the field names 8186 (prefixed by table name and '.') 8187 8188 normally only called from within gluon.sql 8189 """ 8190
8191 - def __init__(self, table):
8192 self._table = table
8193
8194 - def __str__(self):
8195 return ', '.join([str(field) for field in self._table])
8196
8197 # class Reference(int): 8198 -class Reference(long):
8199
8200 - def __allocate(self):
8201 if not self._record: 8202 self._record = self._table[long(self)] 8203 if not self._record: 8204 raise RuntimeError( 8205 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
8206
8207 - def __getattr__(self, key):
8208 if key == 'id': 8209 return long(self) 8210 if key in self._table: 8211 self.__allocate() 8212 if self._record: 8213 return self._record.get(key,None) # to deal with case self.update_record() 8214 else: 8215 return None
8216
8217 - def get(self, key, default=None):
8218 return self.__getattr__(key, default)
8219
8220 - def __setattr__(self, key, value):
8221 if key.startswith('_'): 8222 long.__setattr__(self, key, value) 8223 return 8224 self.__allocate() 8225 self._record[key] = value
8226
8227 - def __getitem__(self, key):
8228 if key == 'id': 8229 return long(self) 8230 self.__allocate() 8231 return self._record.get(key, None)
8232
8233 - def __setitem__(self,key,value):
8234 self.__allocate() 8235 self._record[key] = value
8236
8237 8238 -def Reference_unpickler(data):
8239 return marshal.loads(data)
8240
8241 -def Reference_pickler(data):
8242 try: 8243 marshal_dump = marshal.dumps(long(data)) 8244 except AttributeError: 8245 marshal_dump = 'i%s' % struct.pack('<i', long(data)) 8246 return (Reference_unpickler, (marshal_dump,))
8247 8248 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8249 8250 -class MethodAdder(object):
8251 - def __init__(self,table):
8252 self.table = table
8253 - def __call__(self):
8254 return self.register()
8255 - def __getattr__(self,method_name):
8256 return self.register(method_name)
8257 - def register(self,method_name=None):
8258 def _decorated(f): 8259 instance = self.table 8260 import types 8261 method = types.MethodType(f, instance, instance.__class__) 8262 name = method_name or f.func_name 8263 setattr(instance, name, method) 8264 return f
8265 return _decorated
8266
8267 -class Table(object):
8268 8269 """ 8270 an instance of this class represents a database table 8271 8272 Example:: 8273 8274 db = DAL(...) 8275 db.define_table('users', Field('name')) 8276 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8277 db.users.drop() 8278 """ 8279
8280 - def __init__( 8281 self, 8282 db, 8283 tablename, 8284 *fields, 8285 **args 8286 ):
8287 """ 8288 Initializes the table and performs checking on the provided fields. 8289 8290 Each table will have automatically an 'id'. 8291 8292 If a field is of type Table, the fields (excluding 'id') from that table 8293 will be used instead. 8294 8295 :raises SyntaxError: when a supplied field is of incorrect type. 8296 """ 8297 self._actual = False # set to True by define_table() 8298 self._tablename = tablename 8299 self._ot = args.get('actual_name') 8300 self._sequence_name = args.get('sequence_name') or \ 8301 db and db._adapter.sequence_name(tablename) 8302 self._trigger_name = args.get('trigger_name') or \ 8303 db and db._adapter.trigger_name(tablename) 8304 self._common_filter = args.get('common_filter') 8305 self._format = args.get('format') 8306 self._singular = args.get( 8307 'singular',tablename.replace('_',' ').capitalize()) 8308 self._plural = args.get( 8309 'plural',pluralize(self._singular.lower()).capitalize()) 8310 # horrible but for backard compatibility of appamdin: 8311 if 'primarykey' in args and args['primarykey'] is not None: 8312 self._primarykey = args.get('primarykey') 8313 8314 self._before_insert = [] 8315 self._before_update = [Set.delete_uploaded_files] 8316 self._before_delete = [Set.delete_uploaded_files] 8317 self._after_insert = [] 8318 self._after_update = [] 8319 self._after_delete = [] 8320 8321 self.add_method = MethodAdder(self) 8322 8323 fieldnames,newfields=set(),[] 8324 _primarykey = getattr(self, '_primarykey', None) 8325 if _primarykey is not None: 8326 if not isinstance(_primarykey, list): 8327 raise SyntaxError( 8328 "primarykey must be a list of fields from table '%s'" \ 8329 % tablename) 8330 if len(_primarykey)==1: 8331 self._id = [f for f in fields if isinstance(f,Field) \ 8332 and f.name==_primarykey[0]][0] 8333 elif not [f for f in fields if (isinstance(f,Field) and 8334 f.type=='id') or (isinstance(f, dict) and 8335 f.get("type", None)=="id")]: 8336 field = Field('id', 'id') 8337 newfields.append(field) 8338 fieldnames.add('id') 8339 self._id = field 8340 virtual_fields = [] 8341 def include_new(field): 8342 newfields.append(field) 8343 fieldnames.add(field.name) 8344 if field.type=='id': 8345 self._id = field
8346 for field in fields: 8347 if isinstance(field, (FieldMethod, FieldVirtual)): 8348 virtual_fields.append(field) 8349 elif isinstance(field, Field) and not field.name in fieldnames: 8350 if field.db is not None: 8351 field = copy.copy(field) 8352 include_new(field) 8353 elif isinstance(field, dict) and not field['fieldname'] in fieldnames: 8354 include_new(Field(**field)) 8355 elif isinstance(field, Table): 8356 table = field 8357 for field in table: 8358 if not field.name in fieldnames and not field.type=='id': 8359 t2 = not table._actual and self._tablename 8360 include_new(field.clone(point_self_references_to=t2)) 8361 elif not isinstance(field, (Field, Table)): 8362 raise SyntaxError( 8363 'define_table argument is not a Field or Table: %s' % field) 8364 fields = newfields 8365 self._db = db 8366 tablename = tablename 8367 self._fields = SQLCallableList() 8368 self.virtualfields = [] 8369 fields = list(fields) 8370 8371 if db and db._adapter.uploads_in_blob==True: 8372 uploadfields = [f.name for f in fields if f.type=='blob'] 8373 for field in fields: 8374 fn = field.uploadfield 8375 if isinstance(field, Field) and field.type == 'upload'\ 8376 and fn is True: 8377 fn = field.uploadfield = '%s_blob' % field.name 8378 if isinstance(fn,str) and not fn in uploadfields: 8379 fields.append(Field(fn,'blob',default='', 8380 writable=False,readable=False)) 8381 8382 lower_fieldnames = set() 8383 reserved = dir(Table) + ['fields'] 8384 if (db and db.check_reserved): 8385 check_reserved = db.check_reserved_keyword 8386 else: 8387 def check_reserved(field_name): 8388 if field_name in reserved: 8389 raise SyntaxError("field name %s not allowed" % field_name)
8390 for field in fields: 8391 field_name = field.name 8392 check_reserved(field_name) 8393 fn_lower = field_name.lower() 8394 if fn_lower in lower_fieldnames: 8395 raise SyntaxError("duplicate field %s in table %s" \ 8396 % (field_name, tablename)) 8397 else: 8398 lower_fieldnames.add(fn_lower) 8399 8400 self.fields.append(field_name) 8401 self[field_name] = field 8402 if field.type == 'id': 8403 self['id'] = field 8404 field.tablename = field._tablename = tablename 8405 field.table = field._table = self 8406 field.db = field._db = db 8407 self.ALL = SQLALL(self) 8408 8409 if _primarykey is not None: 8410 for k in _primarykey: 8411 if k not in self.fields: 8412 raise SyntaxError( 8413 "primarykey must be a list of fields from table '%s " % tablename) 8414 else: 8415 self[k].notnull = True 8416 for field in virtual_fields: 8417 self[field.name] = field 8418 8419 @property
8420 - def fields(self):
8421 return self._fields
8422
8423 - def update(self,*args,**kwargs):
8424 raise RuntimeError("Syntax Not Supported")
8425
8426 - def _enable_record_versioning(self, 8427 archive_db=None, 8428 archive_name = '%(tablename)s_archive', 8429 is_active = 'is_active', 8430 current_record = 'current_record', 8431 current_record_label = None):
8432 db = self._db 8433 archive_db = archive_db or db 8434 archive_name = archive_name % dict(tablename=self._tablename) 8435 if archive_name in archive_db.tables(): 8436 return # do not try define the archive if already exists 8437 fieldnames = self.fields() 8438 same_db = archive_db is db 8439 field_type = self if same_db else 'bigint' 8440 clones = [] 8441 for field in self: 8442 nfk = same_db or not field.type.startswith('reference') 8443 clones.append(field.clone( 8444 unique=False, type=field.type if nfk else 'bigint')) 8445 archive_db.define_table( 8446 archive_name, Field(current_record,field_type, 8447 label=current_record_label), *clones) 8448 self._before_update.append( 8449 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8450 archive_record(qset,fs,db[an],cn)) 8451 if is_active and is_active in fieldnames: 8452 self._before_delete.append( 8453 lambda qset: qset.update(is_active=False)) 8454 newquery = lambda query, t=self, name=self._tablename: \ 8455 reduce(AND,[db[tn].is_active == True 8456 for tn in db._adapter.tables(query) 8457 if tn==name or getattr(db[tn],'_ot',None)==name]) 8458 query = self._common_filter 8459 if query: 8460 newquery = query & newquery 8461 self._common_filter = newquery
8462
8463 - def _validate(self,**vars):
8464 errors = Row() 8465 for key,value in vars.iteritems(): 8466 value,error = self[key].validate(value) 8467 if error: 8468 errors[key] = error 8469 return errors
8470
8471 - def _create_references(self):
8472 db = self._db 8473 pr = db._pending_references 8474 self._referenced_by = [] 8475 self._references = [] 8476 for field in self: 8477 fieldname = field.name 8478 field_type = field.type 8479 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8480 ref = field_type[10:].strip() 8481 if not ref: 8482 SyntaxError('Table: reference to nothing: %s' %ref) 8483 if '.' in ref: 8484 rtablename, throw_it,rfieldname = ref.partition('.') 8485 else: 8486 rtablename, rfieldname = ref, None 8487 if not rtablename in db: 8488 pr[rtablename] = pr.get(rtablename,[]) + [field] 8489 continue 8490 rtable = db[rtablename] 8491 if rfieldname: 8492 if not hasattr(rtable,'_primarykey'): 8493 raise SyntaxError( 8494 'keyed tables can only reference other keyed tables (for now)') 8495 if rfieldname not in rtable.fields: 8496 raise SyntaxError( 8497 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8498 % (rfieldname, rtablename, self._tablename)) 8499 rfield = rtable[rfieldname] 8500 else: 8501 rfield = rtable._id 8502 rtable._referenced_by.append(field) 8503 field.referent = rfield 8504 self._references.append(field) 8505 else: 8506 field.referent = None 8507 for referee in pr.get(self._tablename,[]): 8508 self._referenced_by.append(referee)
8509
8510 - def _filter_fields(self, record, id=False):
8511 return dict([(k, v) for (k, v) in record.iteritems() if k 8512 in self.fields and (self[k].type!='id' or id)])
8513
8514 - def _build_query(self,key):
8515 """ for keyed table only """ 8516 query = None 8517 for k,v in key.iteritems(): 8518 if k in self._primarykey: 8519 if query: 8520 query = query & (self[k] == v) 8521 else: 8522 query = (self[k] == v) 8523 else: 8524 raise SyntaxError( 8525 'Field %s is not part of the primary key of %s' % \ 8526 (k,self._tablename)) 8527 return query
8528
8529 - def __getitem__(self, key):
8530 if not key: 8531 return None 8532 elif isinstance(key, dict): 8533 """ for keyed table """ 8534 query = self._build_query(key) 8535 return self._db(query).select(limitby=(0,1), orderby_on_limitby=False).first() 8536 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8537 return self._db(self._id == key).select(limitby=(0,1), orderby_on_limitby=False).first() 8538 elif key: 8539 return ogetattr(self, str(key))
8540
8541 - def __call__(self, key=DEFAULT, **kwargs):
8542 for_update = kwargs.get('_for_update',False) 8543 if '_for_update' in kwargs: del kwargs['_for_update'] 8544 8545 orderby = kwargs.get('_orderby',None) 8546 if '_orderby' in kwargs: del kwargs['_orderby'] 8547 8548 if not key is DEFAULT: 8549 if isinstance(key, Query): 8550 record = self._db(key).select( 8551 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8552 elif not str(key).isdigit(): 8553 record = None 8554 else: 8555 record = self._db(self._id == key).select( 8556 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8557 if record: 8558 for k,v in kwargs.iteritems(): 8559 if record[k]!=v: return None 8560 return record 8561 elif kwargs: 8562 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8563 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8564 else: 8565 return None
8566
8567 - def __setitem__(self, key, value):
8568 if isinstance(key, dict) and isinstance(value, dict): 8569 """ option for keyed table """ 8570 if set(key.keys()) == set(self._primarykey): 8571 value = self._filter_fields(value) 8572 kv = {} 8573 kv.update(value) 8574 kv.update(key) 8575 if not self.insert(**kv): 8576 query = self._build_query(key) 8577 self._db(query).update(**self._filter_fields(value)) 8578 else: 8579 raise SyntaxError( 8580 'key must have all fields from primary key: %s'%\ 8581 (self._primarykey)) 8582 elif str(key).isdigit(): 8583 if key == 0: 8584 self.insert(**self._filter_fields(value)) 8585 elif self._db(self._id == key)\ 8586 .update(**self._filter_fields(value)) is None: 8587 raise SyntaxError('No such record: %s' % key) 8588 else: 8589 if isinstance(key, dict): 8590 raise SyntaxError( 8591 'value must be a dictionary: %s' % value) 8592 osetattr(self, str(key), value)
8593 8594 __getattr__ = __getitem__ 8595
8596 - def __setattr__(self, key, value):
8597 if key[:1]!='_' and key in self: 8598 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8599 osetattr(self,key,value)
8600
8601 - def __delitem__(self, key):
8602 if isinstance(key, dict): 8603 query = self._build_query(key) 8604 if not self._db(query).delete(): 8605 raise SyntaxError('No such record: %s' % key) 8606 elif not str(key).isdigit() or \ 8607 not self._db(self._id == key).delete(): 8608 raise SyntaxError('No such record: %s' % key)
8609
8610 - def __contains__(self,key):
8611 return hasattr(self,key)
8612 8613 has_key = __contains__ 8614
8615 - def items(self):
8616 return self.__dict__.items()
8617
8618 - def __iter__(self):
8619 for fieldname in self.fields: 8620 yield self[fieldname]
8621
8622 - def iteritems(self):
8623 return self.__dict__.iteritems()
8624 8625
8626 - def __repr__(self):
8627 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8628
8629 - def __str__(self):
8630 if self._ot is not None: 8631 ot = self._db._adapter.QUOTE_TEMPLATE % self._ot 8632 if 'Oracle' in str(type(self._db._adapter)): 8633 return '%s %s' % (ot, self._tablename) 8634 return '%s AS %s' % (ot, self._tablename) 8635 return self._tablename
8636
8637 - def _drop(self, mode = ''):
8638 return self._db._adapter._drop(self, mode)
8639
8640 - def drop(self, mode = ''):
8641 return self._db._adapter.drop(self,mode)
8642
8643 - def _listify(self,fields,update=False):
8644 new_fields = {} # format: new_fields[name] = (field,value) 8645 8646 # store all fields passed as input in new_fields 8647 for name in fields: 8648 if not name in self.fields: 8649 if name != 'id': 8650 raise SyntaxError( 8651 'Field %s does not belong to the table' % name) 8652 else: 8653 field = self[name] 8654 value = fields[name] 8655 if field.filter_in: 8656 value = field.filter_in(value) 8657 new_fields[name] = (field,value) 8658 8659 # check all fields that should be in the table but are not passed 8660 to_compute = [] 8661 for ofield in self: 8662 name = ofield.name 8663 if not name in new_fields: 8664 # if field is supposed to be computed, compute it! 8665 if ofield.compute: # save those to compute for later 8666 to_compute.append((name,ofield)) 8667 # if field is required, check its default value 8668 elif not update and not ofield.default is None: 8669 value = ofield.default 8670 fields[name] = value 8671 new_fields[name] = (ofield,value) 8672 # if this is an update, user the update field instead 8673 elif update and not ofield.update is None: 8674 value = ofield.update 8675 fields[name] = value 8676 new_fields[name] = (ofield,value) 8677 # if the field is still not there but it should, error 8678 elif not update and ofield.required: 8679 raise RuntimeError( 8680 'Table: missing required field: %s' % name) 8681 # now deal with fields that are supposed to be computed 8682 if to_compute: 8683 row = Row(fields) 8684 for name,ofield in to_compute: 8685 # try compute it 8686 try: 8687 row[name] = new_value = ofield.compute(row) 8688 new_fields[name] = (ofield, new_value) 8689 except (KeyError, AttributeError): 8690 # error silently unless field is required! 8691 if ofield.required: 8692 raise SyntaxError('unable to compute field: %s' % name) 8693 return new_fields.values()
8694
8695 - def _attempt_upload(self, fields):
8696 for field in self: 8697 if field.type=='upload' and field.name in fields: 8698 value = fields[field.name] 8699 if value is not None and not isinstance(value,str): 8700 if hasattr(value,'file') and hasattr(value,'filename'): 8701 new_name = field.store(value.file,filename=value.filename) 8702 elif hasattr(value,'read') and hasattr(value,'name'): 8703 new_name = field.store(value,filename=value.name) 8704 else: 8705 raise RuntimeError("Unable to handle upload") 8706 fields[field.name] = new_name
8707
8708 - def _defaults(self, fields):
8709 "If there are no fields/values specified, return table defaults" 8710 if not fields: 8711 fields = {} 8712 for field in self: 8713 if field.type != "id": 8714 fields[field.name] = field.default 8715 return fields
8716
8717 - def _insert(self, **fields):
8718 fields = self._defaults(fields) 8719 return self._db._adapter._insert(self, self._listify(fields))
8720
8721 - def insert(self, **fields):
8722 fields = self._defaults(fields) 8723 self._attempt_upload(fields) 8724 if any(f(fields) for f in self._before_insert): return 0 8725 ret = self._db._adapter.insert(self, self._listify(fields)) 8726 if ret and self._after_insert: 8727 fields = Row(fields) 8728 [f(fields,ret) for f in self._after_insert] 8729 return ret
8730
8731 - def validate_and_insert(self,**fields):
8732 response = Row() 8733 response.errors = Row() 8734 new_fields = copy.copy(fields) 8735 for key,value in fields.iteritems(): 8736 value,error = self[key].validate(value) 8737 if error: 8738 response.errors[key] = "%s" % error 8739 else: 8740 new_fields[key] = value 8741 if not response.errors: 8742 response.id = self.insert(**new_fields) 8743 else: 8744 response.id = None 8745 return response
8746
8747 - def validate_and_update(self, _key=DEFAULT, **fields):
8748 response = Row() 8749 response.errors = Row() 8750 new_fields = copy.copy(fields) 8751 8752 for key,value in fields.iteritems(): 8753 value,error = self[key].validate(value) 8754 if error: 8755 response.errors[key] = "%s" % error 8756 else: 8757 new_fields[key] = value 8758 8759 if _key is DEFAULT: 8760 record = self(**values) 8761 elif isinstance(_key,dict): 8762 record = self(**_key) 8763 else: 8764 record = self(_key) 8765 8766 if not response.errors and record: 8767 row = self._db(self._id==_key) 8768 response.id = row.update(**fields) 8769 else: 8770 response.id = None 8771 return response
8772
8773 - def update_or_insert(self, _key=DEFAULT, **values):
8774 if _key is DEFAULT: 8775 record = self(**values) 8776 elif isinstance(_key,dict): 8777 record = self(**_key) 8778 else: 8779 record = self(_key) 8780 if record: 8781 record.update_record(**values) 8782 newid = None 8783 else: 8784 newid = self.insert(**values) 8785 return newid
8786
8787 - def bulk_insert(self, items):
8788 """ 8789 here items is a list of dictionaries 8790 """ 8791 items = [self._listify(item) for item in items] 8792 if any(f(item) for item in items for f in self._before_insert):return 0 8793 ret = self._db._adapter.bulk_insert(self,items) 8794 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8795 return ret
8796
8797 - def _truncate(self, mode = None):
8798 return self._db._adapter._truncate(self, mode)
8799
8800 - def truncate(self, mode = None):
8801 return self._db._adapter.truncate(self, mode)
8802
8803 - def import_from_csv_file( 8804 self, 8805 csvfile, 8806 id_map=None, 8807 null='<NULL>', 8808 unique='uuid', 8809 id_offset=None, # id_offset used only when id_map is None 8810 *args, **kwargs 8811 ):
8812 """ 8813 Import records from csv file. 8814 Column headers must have same names as table fields. 8815 Field 'id' is ignored. 8816 If column names read 'table.file' the 'table.' prefix is ignored. 8817 'unique' argument is a field which must be unique 8818 (typically a uuid field) 8819 'restore' argument is default False; 8820 if set True will remove old values in table first. 8821 'id_map' ff set to None will not map ids. 8822 The import will keep the id numbers in the restored table. 8823 This assumes that there is an field of type id that 8824 is integer and in incrementing order. 8825 Will keep the id numbers in restored table. 8826 """ 8827 8828 delimiter = kwargs.get('delimiter', ',') 8829 quotechar = kwargs.get('quotechar', '"') 8830 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8831 restore = kwargs.get('restore', False) 8832 if restore: 8833 self._db[self].truncate() 8834 8835 reader = csv.reader(csvfile, delimiter=delimiter, 8836 quotechar=quotechar, quoting=quoting) 8837 colnames = None 8838 if isinstance(id_map, dict): 8839 if not self._tablename in id_map: 8840 id_map[self._tablename] = {} 8841 id_map_self = id_map[self._tablename] 8842 8843 def fix(field, value, id_map, id_offset): 8844 list_reference_s='list:reference' 8845 if value == null: 8846 value = None 8847 elif field.type=='blob': 8848 value = base64.b64decode(value) 8849 elif field.type=='double' or field.type=='float': 8850 if not value.strip(): 8851 value = None 8852 else: 8853 value = float(value) 8854 elif field.type in ('integer','bigint'): 8855 if not value.strip(): 8856 value = None 8857 else: 8858 value = long(value) 8859 elif field.type.startswith('list:string'): 8860 value = bar_decode_string(value) 8861 elif field.type.startswith(list_reference_s): 8862 ref_table = field.type[len(list_reference_s):].strip() 8863 if id_map is not None: 8864 value = [id_map[ref_table][long(v)] \ 8865 for v in bar_decode_string(value)] 8866 else: 8867 value = [v for v in bar_decode_string(value)] 8868 elif field.type.startswith('list:'): 8869 value = bar_decode_integer(value) 8870 elif id_map and field.type.startswith('reference'): 8871 try: 8872 value = id_map[field.type[9:].strip()][long(value)] 8873 except KeyError: 8874 pass 8875 elif id_offset and field.type.startswith('reference'): 8876 try: 8877 value = id_offset[field.type[9:].strip()]+long(value) 8878 except KeyError: 8879 pass 8880 return (field.name, value)
8881 8882 def is_id(colname): 8883 if colname in self: 8884 return self[colname].type == 'id' 8885 else: 8886 return False 8887 8888 first = True 8889 unique_idx = None 8890 for lineno, line in enumerate(reader): 8891 if not line: 8892 break 8893 if not colnames: 8894 # assume this is the first line of the input, contains colnames 8895 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8896 cols, cid = [], None 8897 for i,colname in enumerate(colnames): 8898 if is_id(colname): 8899 cid = i 8900 elif colname in self.fields: 8901 cols.append((i,self[colname])) 8902 if colname == unique: 8903 unique_idx = i 8904 else: 8905 # every other line contains instead data 8906 items = [] 8907 for i, field in cols: 8908 try: 8909 items.append(fix(field, line[i], id_map, id_offset)) 8910 except ValueError: 8911 raise RuntimeError("Unable to parse line:%s field:%s value:'%s'" 8912 % (lineno+1,field,line[i])) 8913 8914 if not (id_map or cid is None or id_offset is None or unique_idx): 8915 csv_id = long(line[cid]) 8916 curr_id = self.insert(**dict(items)) 8917 if first: 8918 first = False 8919 # First curr_id is bigger than csv_id, 8920 # then we are not restoring but 8921 # extending db table with csv db table 8922 id_offset[self._tablename] = (curr_id-csv_id) \ 8923 if curr_id>csv_id else 0 8924 # create new id until we get the same as old_id+offset 8925 while curr_id<csv_id+id_offset[self._tablename]: 8926 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8927 curr_id = self.insert(**dict(items)) 8928 # Validation. Check for duplicate of 'unique' &, 8929 # if present, update instead of insert. 8930 elif not unique_idx: 8931 new_id = self.insert(**dict(items)) 8932 else: 8933 unique_value = line[unique_idx] 8934 query = self._db[self][unique] == unique_value 8935 record = self._db(query).select().first() 8936 if record: 8937 record.update_record(**dict(items)) 8938 new_id = record[self._id.name] 8939 else: 8940 new_id = self.insert(**dict(items)) 8941 if id_map and cid is not None: 8942 id_map_self[long(line[cid])] = new_id 8943
8944 - def as_dict(self, flat=False, sanitize=True):
8945 table_as_dict = dict(tablename=str(self), fields=[], 8946 sequence_name=self._sequence_name, 8947 trigger_name=self._trigger_name, 8948 common_filter=self._common_filter, format=self._format, 8949 singular=self._singular, plural=self._plural) 8950 8951 for field in self: 8952 if (field.readable or field.writable) or (not sanitize): 8953 table_as_dict["fields"].append(field.as_dict( 8954 flat=flat, sanitize=sanitize)) 8955 return table_as_dict
8956
8957 - def as_xml(self, sanitize=True):
8958 if not have_serializers: 8959 raise ImportError("No xml serializers available") 8960 d = self.as_dict(flat=True, sanitize=sanitize) 8961 return serializers.xml(d)
8962
8963 - def as_json(self, sanitize=True):
8964 if not have_serializers: 8965 raise ImportError("No json serializers available") 8966 d = self.as_dict(flat=True, sanitize=sanitize) 8967 return serializers.json(d)
8968
8969 - def as_yaml(self, sanitize=True):
8970 if not have_serializers: 8971 raise ImportError("No YAML serializers available") 8972 d = self.as_dict(flat=True, sanitize=sanitize) 8973 return serializers.yaml(d)
8974
8975 - def with_alias(self, alias):
8976 return self._db._adapter.alias(self,alias)
8977
8978 - def on(self, query):
8979 return Expression(self._db,self._db._adapter.ON,self,query)
8980
8981 -def archive_record(qset,fs,archive_table,current_record):
8982 tablenames = qset.db._adapter.tables(qset.query) 8983 if len(tablenames)!=1: raise RuntimeError("cannot update join") 8984 table = qset.db[tablenames[0]] 8985 for row in qset.select(): 8986 fields = archive_table._filter_fields(row) 8987 fields[current_record] = row.id 8988 archive_table.insert(**fields) 8989 return False
8990
8991 8992 8993 -class Expression(object):
8994
8995 - def __init__( 8996 self, 8997 db, 8998 op, 8999 first=None, 9000 second=None, 9001 type=None, 9002 **optional_args 9003 ):
9004 9005 self.db = db 9006 self.op = op 9007 self.first = first 9008 self.second = second 9009 self._table = getattr(first,'_table',None) 9010 ### self._tablename = first._tablename ## CHECK 9011 if not type and first and hasattr(first,'type'): 9012 self.type = first.type 9013 else: 9014 self.type = type 9015 self.optional_args = optional_args
9016
9017 - def sum(self):
9018 db = self.db 9019 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
9020
9021 - def max(self):
9022 db = self.db 9023 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
9024
9025 - def min(self):
9026 db = self.db 9027 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
9028
9029 - def len(self):
9030 db = self.db 9031 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
9032
9033 - def avg(self):
9034 db = self.db 9035 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
9036
9037 - def abs(self):
9038 db = self.db 9039 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
9040
9041 - def lower(self):
9042 db = self.db 9043 return Expression(db, db._adapter.LOWER, self, None, self.type)
9044
9045 - def upper(self):
9046 db = self.db 9047 return Expression(db, db._adapter.UPPER, self, None, self.type)
9048
9049 - def replace(self,a,b):
9050 db = self.db 9051 return Expression(db, db._adapter.REPLACE, self, (a,b), self.type)
9052
9053 - def year(self):
9054 db = self.db 9055 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
9056
9057 - def month(self):
9058 db = self.db 9059 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
9060
9061 - def day(self):
9062 db = self.db 9063 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
9064
9065 - def hour(self):
9066 db = self.db 9067 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
9068
9069 - def minutes(self):
9070 db = self.db 9071 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
9072
9073 - def coalesce(self,*others):
9074 db = self.db 9075 return Expression(db, db._adapter.COALESCE, self, others, self.type)
9076
9077 - def coalesce_zero(self):
9078 db = self.db 9079 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
9080
9081 - def seconds(self):
9082 db = self.db 9083 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
9084
9085 - def epoch(self):
9086 db = self.db 9087 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
9088
9089 - def __getslice__(self, start, stop):
9090 db = self.db 9091 if start < 0: 9092 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 9093 else: 9094 pos0 = start + 1 9095 9096 if stop < 0: 9097 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 9098 elif stop == sys.maxint: 9099 length = self.len() 9100 else: 9101 length = '(%s - %s)' % (stop + 1, pos0) 9102 return Expression(db,db._adapter.SUBSTRING, 9103 self, (pos0, length), self.type)
9104
9105 - def __getitem__(self, i):
9106 return self[i:i + 1]
9107
9108 - def __str__(self):
9109 return self.db._adapter.expand(self,self.type)
9110
9111 - def __or__(self, other): # for use in sortby
9112 db = self.db 9113 return Expression(db,db._adapter.COMMA,self,other,self.type)
9114
9115 - def __invert__(self):
9116 db = self.db 9117 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 9118 return self.first 9119 return Expression(db,db._adapter.INVERT,self,type=self.type)
9120
9121 - def __add__(self, other):
9122 db = self.db 9123 return Expression(db,db._adapter.ADD,self,other,self.type)
9124
9125 - def __sub__(self, other):
9126 db = self.db 9127 if self.type in ('integer','bigint'): 9128 result_type = 'integer' 9129 elif self.type in ['date','time','datetime','double','float']: 9130 result_type = 'double' 9131 elif self.type.startswith('decimal('): 9132 result_type = self.type 9133 else: 9134 raise SyntaxError("subtraction operation not supported for type") 9135 return Expression(db,db._adapter.SUB,self,other,result_type)
9136
9137 - def __mul__(self, other):
9138 db = self.db 9139 return Expression(db,db._adapter.MUL,self,other,self.type)
9140
9141 - def __div__(self, other):
9142 db = self.db 9143 return Expression(db,db._adapter.DIV,self,other,self.type)
9144
9145 - def __mod__(self, other):
9146 db = self.db 9147 return Expression(db,db._adapter.MOD,self,other,self.type)
9148
9149 - def __eq__(self, value):
9150 db = self.db 9151 return Query(db, db._adapter.EQ, self, value)
9152
9153 - def __ne__(self, value):
9154 db = self.db 9155 return Query(db, db._adapter.NE, self, value)
9156
9157 - def __lt__(self, value):
9158 db = self.db 9159 return Query(db, db._adapter.LT, self, value)
9160
9161 - def __le__(self, value):
9162 db = self.db 9163 return Query(db, db._adapter.LE, self, value)
9164
9165 - def __gt__(self, value):
9166 db = self.db 9167 return Query(db, db._adapter.GT, self, value)
9168
9169 - def __ge__(self, value):
9170 db = self.db 9171 return Query(db, db._adapter.GE, self, value)
9172
9173 - def like(self, value, case_sensitive=False):
9174 db = self.db 9175 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 9176 return Query(db, op, self, value)
9177
9178 - def regexp(self, value):
9179 db = self.db 9180 return Query(db, db._adapter.REGEXP, self, value)
9181
9182 - def belongs(self, *value, **kwattr):
9183 """ 9184 Accepts the following inputs: 9185 field.belongs(1,2) 9186 field.belongs((1,2)) 9187 field.belongs(query) 9188 9189 Does NOT accept: 9190 field.belongs(1) 9191 """ 9192 db = self.db 9193 if len(value) == 1: 9194 value = value[0] 9195 if isinstance(value,Query): 9196 value = db(value)._select(value.first._table._id) 9197 elif not isinstance(value, basestring): 9198 value = set(value) 9199 if kwattr.get('null') and None in value: 9200 value.remove(None) 9201 return (self == None) | Query(db, db._adapter.BELONGS, self, value) 9202 return Query(db, db._adapter.BELONGS, self, value)
9203
9204 - def startswith(self, value):
9205 db = self.db 9206 if not self.type in ('string', 'text', 'json'): 9207 raise SyntaxError("startswith used with incompatible field type") 9208 return Query(db, db._adapter.STARTSWITH, self, value)
9209
9210 - def endswith(self, value):
9211 db = self.db 9212 if not self.type in ('string', 'text', 'json'): 9213 raise SyntaxError("endswith used with incompatible field type") 9214 return Query(db, db._adapter.ENDSWITH, self, value)
9215
9216 - def contains(self, value, all=False, case_sensitive=False):
9217 """ 9218 The case_sensitive parameters is only useful for PostgreSQL 9219 For other RDMBs it is ignored and contains is always case in-sensitive 9220 For MongoDB and GAE contains is always case sensitive 9221 """ 9222 db = self.db 9223 if isinstance(value,(list, tuple)): 9224 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 9225 for v in value if str(v).strip()] 9226 if not subqueries: 9227 return self.contains('') 9228 else: 9229 return reduce(all and AND or OR,subqueries) 9230 if not self.type in ('string', 'text', 'json') and not self.type.startswith('list:'): 9231 raise SyntaxError("contains used with incompatible field type") 9232 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
9233
9234 - def with_alias(self, alias):
9235 db = self.db 9236 return Expression(db, db._adapter.AS, self, alias, self.type)
9237 9238 # GIS expressions 9239
9240 - def st_asgeojson(self, precision=15, options=0, version=1):
9241 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9242 dict(precision=precision, options=options, 9243 version=version), 'string')
9244
9245 - def st_astext(self):
9246 db = self.db 9247 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9248
9249 - def st_x(self):
9250 db = self.db 9251 return Expression(db, db._adapter.ST_X, self, type='string')
9252
9253 - def st_y(self):
9254 db = self.db 9255 return Expression(db, db._adapter.ST_Y, self, type='string')
9256
9257 - def st_distance(self, other):
9258 db = self.db 9259 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9260
9261 - def st_simplify(self, value):
9262 db = self.db 9263 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9264 9265 # GIS queries 9266
9267 - def st_contains(self, value):
9268 db = self.db 9269 return Query(db, db._adapter.ST_CONTAINS, self, value)
9270
9271 - def st_equals(self, value):
9272 db = self.db 9273 return Query(db, db._adapter.ST_EQUALS, self, value)
9274
9275 - def st_intersects(self, value):
9276 db = self.db 9277 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9278
9279 - def st_overlaps(self, value):
9280 db = self.db 9281 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9282
9283 - def st_touches(self, value):
9284 db = self.db 9285 return Query(db, db._adapter.ST_TOUCHES, self, value)
9286
9287 - def st_within(self, value):
9288 db = self.db 9289 return Query(db, db._adapter.ST_WITHIN, self, value)
9290
9291 # for use in both Query and sortby 9292 9293 9294 -class SQLCustomType(object):
9295 """ 9296 allows defining of custom SQL types 9297 9298 Example:: 9299 9300 decimal = SQLCustomType( 9301 type ='double', 9302 native ='integer', 9303 encoder =(lambda x: int(float(x) * 100)), 9304 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9305 ) 9306 9307 db.define_table( 9308 'example', 9309 Field('value', type=decimal) 9310 ) 9311 9312 :param type: the web2py type (default = 'string') 9313 :param native: the backend type 9314 :param encoder: how to encode the value to store it in the backend 9315 :param decoder: how to decode the value retrieved from the backend 9316 :param validator: what validators to use ( default = None, will use the 9317 default validator for type) 9318 """ 9319
9320 - def __init__( 9321 self, 9322 type='string', 9323 native=None, 9324 encoder=None, 9325 decoder=None, 9326 validator=None, 9327 _class=None, 9328 ):
9329 9330 self.type = type 9331 self.native = native 9332 self.encoder = encoder or (lambda x: x) 9333 self.decoder = decoder or (lambda x: x) 9334 self.validator = validator 9335 self._class = _class or type
9336
9337 - def startswith(self, text=None):
9338 try: 9339 return self.type.startswith(self, text) 9340 except TypeError: 9341 return False
9342
9343 - def __getslice__(self, a=0, b=100):
9344 return None
9345
9346 - def __getitem__(self, i):
9347 return None
9348
9349 - def __str__(self):
9350 return self._class
9351
9352 -class FieldVirtual(object):
9353 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9354 # for backward compatibility 9355 (self.name, self.f) = (name, f) if f else ('unknown', name) 9356 self.type = ftype 9357 self.label = label or self.name.capitalize().replace('_',' ') 9358 self.represent = lambda v,r:v 9359 self.formatter = IDENTITY 9360 self.comment = None 9361 self.readable = True 9362 self.writable = False 9363 self.requires = None 9364 self.widget = None 9365 self.tablename = table_name 9366 self.filter_out = None
9367 - def __str__(self):
9368 return '%s.%s' % (self.tablename, self.name)
9369
9370 -class FieldMethod(object):
9371 - def __init__(self, name, f=None, handler=None):
9372 # for backward compatibility 9373 (self.name, self.f) = (name, f) if f else ('unknown', name) 9374 self.handler = handler
9375
9376 -def list_represent(x,r=None):
9377 return ', '.join(str(y) for y in x or [])
9378
9379 -class Field(Expression):
9380 9381 Virtual = FieldVirtual 9382 Method = FieldMethod 9383 Lazy = FieldMethod # for backward compatibility 9384 9385 """ 9386 an instance of this class represents a database field 9387 9388 example:: 9389 9390 a = Field(name, 'string', length=32, default=None, required=False, 9391 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9392 notnull=False, unique=False, 9393 uploadfield=True, widget=None, label=None, comment=None, 9394 uploadfield=True, # True means store on disk, 9395 # 'a_field_name' means store in this field in db 9396 # False means file content will be discarded. 9397 writable=True, readable=True, update=None, authorize=None, 9398 autodelete=False, represent=None, uploadfolder=None, 9399 uploadseparate=False # upload to separate directories by uuid_keys 9400 # first 2 character and tablename.fieldname 9401 # False - old behavior 9402 # True - put uploaded file in 9403 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9404 # directory) 9405 uploadfs=None # a pyfilesystem where to store upload 9406 9407 to be used as argument of DAL.define_table 9408 9409 allowed field types: 9410 string, boolean, integer, double, text, blob, 9411 date, time, datetime, upload, password 9412 9413 """ 9414
9415 - def __init__( 9416 self, 9417 fieldname, 9418 type='string', 9419 length=None, 9420 default=DEFAULT, 9421 required=False, 9422 requires=DEFAULT, 9423 ondelete='CASCADE', 9424 notnull=False, 9425 unique=False, 9426 uploadfield=True, 9427 widget=None, 9428 label=None, 9429 comment=None, 9430 writable=True, 9431 readable=True, 9432 update=None, 9433 authorize=None, 9434 autodelete=False, 9435 represent=None, 9436 uploadfolder=None, 9437 uploadseparate=False, 9438 uploadfs=None, 9439 compute=None, 9440 custom_store=None, 9441 custom_retrieve=None, 9442 custom_retrieve_file_properties=None, 9443 custom_delete=None, 9444 filter_in = None, 9445 filter_out = None, 9446 custom_qualifier = None, 9447 map_none = None, 9448 ):
9449 self._db = self.db = None # both for backward compatibility 9450 self.op = None 9451 self.first = None 9452 self.second = None 9453 if isinstance(fieldname, unicode): 9454 try: 9455 fieldname = str(fieldname) 9456 except UnicodeEncodeError: 9457 raise SyntaxError('Field: invalid unicode field name') 9458 self.name = fieldname = cleanup(fieldname) 9459 if not isinstance(fieldname, str) or hasattr(Table, fieldname) or \ 9460 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9461 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9462 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9463 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9464 self.default = default if default!=DEFAULT else (update or None) 9465 self.required = required # is this field required 9466 self.ondelete = ondelete.upper() # this is for reference fields only 9467 self.notnull = notnull 9468 self.unique = unique 9469 self.uploadfield = uploadfield 9470 self.uploadfolder = uploadfolder 9471 self.uploadseparate = uploadseparate 9472 self.uploadfs = uploadfs 9473 self.widget = widget 9474 self.comment = comment 9475 self.writable = writable 9476 self.readable = readable 9477 self.update = update 9478 self.authorize = authorize 9479 self.autodelete = autodelete 9480 self.represent = list_represent if \ 9481 represent==None and type in ('list:integer','list:string') else represent 9482 self.compute = compute 9483 self.isattachment = True 9484 self.custom_store = custom_store 9485 self.custom_retrieve = custom_retrieve 9486 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9487 self.custom_delete = custom_delete 9488 self.filter_in = filter_in 9489 self.filter_out = filter_out 9490 self.custom_qualifier = custom_qualifier 9491 self.label = label if label!=None else fieldname.replace('_',' ').title() 9492 self.requires = requires if requires!=None else [] 9493 self.map_none = map_none
9494
9495 - def set_attributes(self,*args,**attributes):
9496 self.__dict__.update(*args,**attributes)
9497
9498 - def clone(self,point_self_references_to=False,**args):
9499 field = copy.copy(self) 9500 if point_self_references_to and \ 9501 field.type == 'reference %s'+field._tablename: 9502 field.type = 'reference %s' % point_self_references_to 9503 field.__dict__.update(args) 9504 return field
9505
9506 - def store(self, file, filename=None, path=None):
9507 if self.custom_store: 9508 return self.custom_store(file,filename,path) 9509 if isinstance(file, cgi.FieldStorage): 9510 filename = filename or file.filename 9511 file = file.file 9512 elif not filename: 9513 filename = file.name 9514 filename = os.path.basename(filename.replace('/', os.sep)\ 9515 .replace('\\', os.sep)) 9516 m = REGEX_STORE_PATTERN.search(filename) 9517 extension = m and m.group('e') or 'txt' 9518 uuid_key = web2py_uuid().replace('-', '')[-16:] 9519 encoded_filename = base64.b16encode(filename).lower() 9520 newfilename = '%s.%s.%s.%s' % \ 9521 (self._tablename, self.name, uuid_key, encoded_filename) 9522 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9523 self_uploadfield = self.uploadfield 9524 if isinstance(self_uploadfield,Field): 9525 blob_uploadfield_name = self_uploadfield.uploadfield 9526 keys={self_uploadfield.name: newfilename, 9527 blob_uploadfield_name: file.read()} 9528 self_uploadfield.table.insert(**keys) 9529 elif self_uploadfield == True: 9530 if path: 9531 pass 9532 elif self.uploadfolder: 9533 path = self.uploadfolder 9534 elif self.db._adapter.folder: 9535 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9536 else: 9537 raise RuntimeError( 9538 "you must specify a Field(...,uploadfolder=...)") 9539 if self.uploadseparate: 9540 if self.uploadfs: 9541 raise RuntimeError("not supported") 9542 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9543 uuid_key[:2]) 9544 if not exists(path): 9545 os.makedirs(path) 9546 pathfilename = pjoin(path, newfilename) 9547 if self.uploadfs: 9548 dest_file = self.uploadfs.open(newfilename, 'wb') 9549 else: 9550 dest_file = open(pathfilename, 'wb') 9551 try: 9552 shutil.copyfileobj(file, dest_file) 9553 except IOError: 9554 raise IOError( 9555 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9556 dest_file.close() 9557 return newfilename
9558
9559 - def retrieve(self, name, path=None, nameonly=False):
9560 """ 9561 if nameonly==True return (filename, fullfilename) instead of 9562 (filename, stream) 9563 """ 9564 self_uploadfield = self.uploadfield 9565 if self.custom_retrieve: 9566 return self.custom_retrieve(name, path) 9567 import http 9568 if self.authorize or isinstance(self_uploadfield, str): 9569 row = self.db(self == name).select().first() 9570 if not row: 9571 raise http.HTTP(404) 9572 if self.authorize and not self.authorize(row): 9573 raise http.HTTP(403) 9574 file_properties = self.retrieve_file_properties(name,path) 9575 filename = file_properties['filename'] 9576 if isinstance(self_uploadfield, str): # ## if file is in DB 9577 stream = StringIO.StringIO(row[self_uploadfield] or '') 9578 elif isinstance(self_uploadfield,Field): 9579 blob_uploadfield_name = self_uploadfield.uploadfield 9580 query = self_uploadfield == name 9581 data = self_uploadfield.table(query)[blob_uploadfield_name] 9582 stream = StringIO.StringIO(data) 9583 elif self.uploadfs: 9584 # ## if file is on pyfilesystem 9585 stream = self.uploadfs.open(name, 'rb') 9586 else: 9587 # ## if file is on regular filesystem 9588 # this is intentially a sting with filename and not a stream 9589 # this propagates and allows stream_file_or_304_or_206 to be called 9590 fullname = pjoin(file_properties['path'],name) 9591 if nameonly: 9592 return (filename, fullname) 9593 stream = open(fullname,'rb') 9594 return (filename, stream)
9595
9596 - def retrieve_file_properties(self, name, path=None):
9597 m = REGEX_UPLOAD_PATTERN.match(name) 9598 if not m or not self.isattachment: 9599 raise TypeError('Can\'t retrieve %s file properties' % name) 9600 self_uploadfield = self.uploadfield 9601 if self.custom_retrieve_file_properties: 9602 return self.custom_retrieve_file_properties(name, path) 9603 if m.group('name'): 9604 try: 9605 filename = base64.b16decode(m.group('name'), True) 9606 filename = REGEX_CLEANUP_FN.sub('_', filename) 9607 except (TypeError, AttributeError): 9608 filename = name 9609 else: 9610 filename = name 9611 # ## if file is in DB 9612 if isinstance(self_uploadfield, (str, Field)): 9613 return dict(path=None,filename=filename) 9614 # ## if file is on filesystem 9615 if not path: 9616 if self.uploadfolder: 9617 path = self.uploadfolder 9618 else: 9619 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9620 if self.uploadseparate: 9621 t = m.group('table') 9622 f = m.group('field') 9623 u = m.group('uuidkey') 9624 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9625 return dict(path=path,filename=filename)
9626 9627
9628 - def formatter(self, value):
9629 requires = self.requires 9630 if value is None or not requires: 9631 return value or self.map_none 9632 if not isinstance(requires, (list, tuple)): 9633 requires = [requires] 9634 elif isinstance(requires, tuple): 9635 requires = list(requires) 9636 else: 9637 requires = copy.copy(requires) 9638 requires.reverse() 9639 for item in requires: 9640 if hasattr(item, 'formatter'): 9641 value = item.formatter(value) 9642 return value
9643
9644 - def validate(self, value):
9645 if not self.requires or self.requires == DEFAULT: 9646 return ((value if value!=self.map_none else None), None) 9647 requires = self.requires 9648 if not isinstance(requires, (list, tuple)): 9649 requires = [requires] 9650 for validator in requires: 9651 (value, error) = validator(value) 9652 if error: 9653 return (value, error) 9654 return ((value if value!=self.map_none else None), None)
9655
9656 - def count(self, distinct=None):
9657 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9658
9659 - def as_dict(self, flat=False, sanitize=True):
9660 attrs = ("name", 'authorize', 'represent', 'ondelete', 9661 'custom_store', 'autodelete', 'custom_retrieve', 9662 'filter_out', 'uploadseparate', 'widget', 'uploadfs', 9663 'update', 'custom_delete', 'uploadfield', 'uploadfolder', 9664 'custom_qualifier', 'unique', 'writable', 'compute', 9665 'map_none', 'default', 'type', 'required', 'readable', 9666 'requires', 'comment', 'label', 'length', 'notnull', 9667 'custom_retrieve_file_properties', 'filter_in') 9668 serializable = (int, long, basestring, float, tuple, 9669 bool, type(None)) 9670 9671 def flatten(obj): 9672 if isinstance(obj, dict): 9673 return dict((flatten(k), flatten(v)) for k, v in 9674 obj.items()) 9675 elif isinstance(obj, (tuple, list, set)): 9676 return [flatten(v) for v in obj] 9677 elif isinstance(obj, serializable): 9678 return obj 9679 elif isinstance(obj, (datetime.datetime, 9680 datetime.date, datetime.time)): 9681 return str(obj) 9682 else: 9683 return None
9684 9685 d = dict() 9686 if not (sanitize and not (self.readable or self.writable)): 9687 for attr in attrs: 9688 if flat: 9689 d.update({attr: flatten(getattr(self, attr))}) 9690 else: 9691 d.update({attr: getattr(self, attr)}) 9692 d["fieldname"] = d.pop("name") 9693 return d
9694
9695 - def as_xml(self, sanitize=True):
9696 if have_serializers: 9697 xml = serializers.xml 9698 else: 9699 raise ImportError("No xml serializers available") 9700 d = self.as_dict(flat=True, sanitize=sanitize) 9701 return xml(d)
9702
9703 - def as_json(self, sanitize=True):
9704 if have_serializers: 9705 json = serializers.json 9706 else: 9707 raise ImportError("No json serializers available") 9708 d = self.as_dict(flat=True, sanitize=sanitize) 9709 return json(d)
9710
9711 - def as_yaml(self, sanitize=True):
9712 if have_serializers: 9713 d = self.as_dict(flat=True, sanitize=sanitize) 9714 return serializers.yaml(d) 9715 else: 9716 raise ImportError("No YAML serializers available")
9717
9718 - def __nonzero__(self):
9719 return True
9720
9721 - def __str__(self):
9722 try: 9723 return '%s.%s' % (self.tablename, self.name) 9724 except: 9725 return '<no table>.%s' % self.name
9726
9727 9728 -class Query(object):
9729 9730 """ 9731 a query object necessary to define a set. 9732 it can be stored or can be passed to DAL.__call__() to obtain a Set 9733 9734 Example:: 9735 9736 query = db.users.name=='Max' 9737 set = db(query) 9738 records = set.select() 9739 9740 """ 9741
9742 - def __init__( 9743 self, 9744 db, 9745 op, 9746 first=None, 9747 second=None, 9748 ignore_common_filters = False, 9749 **optional_args 9750 ):
9751 self.db = self._db = db 9752 self.op = op 9753 self.first = first 9754 self.second = second 9755 self.ignore_common_filters = ignore_common_filters 9756 self.optional_args = optional_args
9757
9758 - def __repr__(self):
9759 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9760
9761 - def __str__(self):
9762 return self.db._adapter.expand(self)
9763
9764 - def __and__(self, other):
9765 return Query(self.db,self.db._adapter.AND,self,other)
9766 9767 __rand__ = __and__ 9768
9769 - def __or__(self, other):
9770 return Query(self.db,self.db._adapter.OR,self,other)
9771 9772 __ror__ = __or__ 9773
9774 - def __invert__(self):
9775 if self.op==self.db._adapter.NOT: 9776 return self.first 9777 return Query(self.db,self.db._adapter.NOT,self)
9778
9779 - def __eq__(self, other):
9780 return repr(self) == repr(other)
9781
9782 - def __ne__(self, other):
9783 return not (self == other)
9784
9785 - def case(self,t=1,f=0):
9786 return self.db._adapter.CASE(self,t,f)
9787
9788 - def as_dict(self, flat=False, sanitize=True):
9789 """Experimental stuff 9790 9791 This allows to return a plain dictionary with the basic 9792 query representation. Can be used with json/xml services 9793 for client-side db I/O 9794 9795 Example: 9796 >>> q = db.auth_user.id != 0 9797 >>> q.as_dict(flat=True) 9798 {"op": "NE", "first":{"tablename": "auth_user", 9799 "fieldname": "id"}, 9800 "second":0} 9801 """ 9802 9803 SERIALIZABLE_TYPES = (tuple, dict, set, list, int, long, float, 9804 basestring, type(None), bool) 9805 def loop(d): 9806 newd = dict() 9807 for k, v in d.items(): 9808 if k in ("first", "second"): 9809 if isinstance(v, self.__class__): 9810 newd[k] = loop(v.__dict__) 9811 elif isinstance(v, Field): 9812 newd[k] = {"tablename": v._tablename, 9813 "fieldname": v.name} 9814 elif isinstance(v, Expression): 9815 newd[k] = loop(v.__dict__) 9816 elif isinstance(v, SERIALIZABLE_TYPES): 9817 newd[k] = v 9818 elif isinstance(v, (datetime.date, 9819 datetime.time, 9820 datetime.datetime)): 9821 newd[k] = unicode(v) 9822 elif k == "op": 9823 if callable(v): 9824 newd[k] = v.__name__ 9825 elif isinstance(v, basestring): 9826 newd[k] = v 9827 else: pass # not callable or string 9828 elif isinstance(v, SERIALIZABLE_TYPES): 9829 if isinstance(v, dict): 9830 newd[k] = loop(v) 9831 else: newd[k] = v 9832 return newd
9833 9834 if flat: 9835 return loop(self.__dict__) 9836 else: return self.__dict__
9837 9838
9839 - def as_xml(self, sanitize=True):
9840 if have_serializers: 9841 xml = serializers.xml 9842 else: 9843 raise ImportError("No xml serializers available") 9844 d = self.as_dict(flat=True, sanitize=sanitize) 9845 return xml(d)
9846
9847 - def as_json(self, sanitize=True):
9848 if have_serializers: 9849 json = serializers.json 9850 else: 9851 raise ImportError("No json serializers available") 9852 d = self.as_dict(flat=True, sanitize=sanitize) 9853 return json(d)
9854
9855 -def xorify(orderby):
9856 if not orderby: 9857 return None 9858 orderby2 = orderby[0] 9859 for item in orderby[1:]: 9860 orderby2 = orderby2 | item 9861 return orderby2
9862
9863 -def use_common_filters(query):
9864 return (query and hasattr(query,'ignore_common_filters') and \ 9865 not query.ignore_common_filters)
9866
9867 -class Set(object):
9868 9869 """ 9870 a Set represents a set of records in the database, 9871 the records are identified by the query=Query(...) object. 9872 normally the Set is generated by DAL.__call__(Query(...)) 9873 9874 given a set, for example 9875 set = db(db.users.name=='Max') 9876 you can: 9877 set.update(db.users.name='Massimo') 9878 set.delete() # all elements in the set 9879 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9880 and take subsets: 9881 subset = set(db.users.id<5) 9882 """ 9883
9884 - def __init__(self, db, query, ignore_common_filters = None):
9885 self.db = db 9886 self._db = db # for backward compatibility 9887 self.dquery = None 9888 9889 # if query is a dict, parse it 9890 if isinstance(query, dict): 9891 query = self.parse(query) 9892 9893 if not ignore_common_filters is None and \ 9894 use_common_filters(query) == ignore_common_filters: 9895 query = copy.copy(query) 9896 query.ignore_common_filters = ignore_common_filters 9897 self.query = query
9898
9899 - def __repr__(self):
9900 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9901
9902 - def __call__(self, query, ignore_common_filters=False):
9903 if query is None: 9904 return self 9905 elif isinstance(query,Table): 9906 query = self.db._adapter.id_query(query) 9907 elif isinstance(query,str): 9908 query = Expression(self.db,query) 9909 elif isinstance(query,Field): 9910 query = query!=None 9911 if self.query: 9912 return Set(self.db, self.query & query, 9913 ignore_common_filters=ignore_common_filters) 9914 else: 9915 return Set(self.db, query, 9916 ignore_common_filters=ignore_common_filters)
9917
9918 - def _count(self,distinct=None):
9919 return self.db._adapter._count(self.query,distinct)
9920
9921 - def _select(self, *fields, **attributes):
9922 adapter = self.db._adapter 9923 tablenames = adapter.tables(self.query, 9924 attributes.get('join',None), 9925 attributes.get('left',None), 9926 attributes.get('orderby',None), 9927 attributes.get('groupby',None)) 9928 fields = adapter.expand_all(fields, tablenames) 9929 return adapter._select(self.query,fields,attributes)
9930
9931 - def _delete(self):
9932 db = self.db 9933 tablename = db._adapter.get_table(self.query) 9934 return db._adapter._delete(tablename,self.query)
9935
9936 - def _update(self, **update_fields):
9937 db = self.db 9938 tablename = db._adapter.get_table(self.query) 9939 fields = db[tablename]._listify(update_fields,update=True) 9940 return db._adapter._update(tablename,self.query,fields)
9941
9942 - def as_dict(self, flat=False, sanitize=True):
9943 if flat: 9944 uid = dbname = uri = None 9945 codec = self.db._db_codec 9946 if not sanitize: 9947 uri, dbname, uid = (self.db._dbname, str(self.db), 9948 self.db._db_uid) 9949 d = {"query": self.query.as_dict(flat=flat)} 9950 d["db"] = {"uid": uid, "codec": codec, 9951 "name": dbname, "uri": uri} 9952 return d 9953 else: return self.__dict__
9954
9955 - def as_xml(self, sanitize=True):
9956 if have_serializers: 9957 xml = serializers.xml 9958 else: 9959 raise ImportError("No xml serializers available") 9960 d = self.as_dict(flat=True, sanitize=sanitize) 9961 return xml(d)
9962
9963 - def as_json(self, sanitize=True):
9964 if have_serializers: 9965 json = serializers.json 9966 else: 9967 raise ImportError("No json serializers available") 9968 d = self.as_dict(flat=True, sanitize=sanitize) 9969 return json(d)
9970
9971 - def parse(self, dquery):
9972 "Experimental: Turn a dictionary into a Query object" 9973 self.dquery = dquery 9974 return self.build(self.dquery)
9975
9976 - def build(self, d):
9977 "Experimental: see .parse()" 9978 op, first, second = (d["op"], d["first"], 9979 d.get("second", None)) 9980 left = right = built = None 9981 9982 if op in ("AND", "OR"): 9983 if not (type(first), type(second)) == (dict, dict): 9984 raise SyntaxError("Invalid AND/OR query") 9985 if op == "AND": 9986 built = self.build(first) & self.build(second) 9987 else: built = self.build(first) | self.build(second) 9988 9989 elif op == "NOT": 9990 if first is None: 9991 raise SyntaxError("Invalid NOT query") 9992 built = ~self.build(first) 9993 else: 9994 # normal operation (GT, EQ, LT, ...) 9995 for k, v in {"left": first, "right": second}.items(): 9996 if isinstance(v, dict) and v.get("op"): 9997 v = self.build(v) 9998 if isinstance(v, dict) and ("tablename" in v): 9999 v = self.db[v["tablename"]][v["fieldname"]] 10000 if k == "left": left = v 10001 else: right = v 10002 10003 if hasattr(self.db._adapter, op): 10004 opm = getattr(self.db._adapter, op) 10005 10006 if op == "EQ": built = left == right 10007 elif op == "NE": built = left != right 10008 elif op == "GT": built = left > right 10009 elif op == "GE": built = left >= right 10010 elif op == "LT": built = left < right 10011 elif op == "LE": built = left <= right 10012 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 10013 built = Expression(self.db, opm) 10014 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 10015 "COALESCE_ZERO", "RAW", "INVERT"): 10016 built = Expression(self.db, opm, left) 10017 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 10018 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 10019 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 10020 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 10021 "COALESCE", "CONTAINS", "BELONGS"): 10022 built = Expression(self.db, opm, left, right) 10023 # expression as string 10024 elif not (left or right): built = Expression(self.db, op) 10025 else: 10026 raise SyntaxError("Operator not supported: %s" % op) 10027 10028 return built
10029
10030 - def isempty(self):
10031 return not self.select(limitby=(0,1), orderby_on_limitby=False)
10032
10033 - def count(self,distinct=None, cache=None):
10034 db = self.db 10035 if cache: 10036 cache_model, time_expire = cache 10037 sql = self._count(distinct=distinct) 10038 key = db._uri + '/' + sql 10039 if len(key)>200: key = hashlib_md5(key).hexdigest() 10040 return cache_model( 10041 key, 10042 (lambda self=self,distinct=distinct: \ 10043 db._adapter.count(self.query,distinct)), 10044 time_expire) 10045 return db._adapter.count(self.query,distinct)
10046
10047 - def select(self, *fields, **attributes):
10048 adapter = self.db._adapter 10049 tablenames = adapter.tables(self.query, 10050 attributes.get('join',None), 10051 attributes.get('left',None), 10052 attributes.get('orderby',None), 10053 attributes.get('groupby',None)) 10054 fields = adapter.expand_all(fields, tablenames) 10055 return adapter.select(self.query,fields,attributes)
10056
10057 - def nested_select(self,*fields,**attributes):
10058 return Expression(self.db,self._select(*fields,**attributes))
10059
10060 - def delete(self):
10061 db = self.db 10062 tablename = db._adapter.get_table(self.query) 10063 table = db[tablename] 10064 if any(f(self) for f in table._before_delete): return 0 10065 ret = db._adapter.delete(tablename,self.query) 10066 ret and [f(self) for f in table._after_delete] 10067 return ret
10068
10069 - def update(self, **update_fields):
10070 db = self.db 10071 tablename = db._adapter.get_table(self.query) 10072 table = db[tablename] 10073 table._attempt_upload(update_fields) 10074 if any(f(self,update_fields) for f in table._before_update): 10075 return 0 10076 fields = table._listify(update_fields,update=True) 10077 if not fields: 10078 raise SyntaxError("No fields to update") 10079 ret = db._adapter.update("%s" % table,self.query,fields) 10080 ret and [f(self,update_fields) for f in table._after_update] 10081 return ret
10082
10083 - def update_naive(self, **update_fields):
10084 """ 10085 same as update but does not call table._before_update and _after_update 10086 """ 10087 tablename = self.db._adapter.get_table(self.query) 10088 table = self.db[tablename] 10089 fields = table._listify(update_fields,update=True) 10090 if not fields: raise SyntaxError("No fields to update") 10091 10092 ret = self.db._adapter.update("%s" % table,self.query,fields) 10093 return ret
10094
10095 - def validate_and_update(self, **update_fields):
10096 tablename = self.db._adapter.get_table(self.query) 10097 response = Row() 10098 response.errors = Row() 10099 new_fields = copy.copy(update_fields) 10100 for key,value in update_fields.iteritems(): 10101 value,error = self.db[tablename][key].validate(value) 10102 if error: 10103 response.errors[key] = error 10104 else: 10105 new_fields[key] = value 10106 table = self.db[tablename] 10107 if response.errors: 10108 response.updated = None 10109 else: 10110 if not any(f(self,new_fields) for f in table._before_update): 10111 fields = table._listify(new_fields,update=True) 10112 if not fields: raise SyntaxError("No fields to update") 10113 ret = self.db._adapter.update(tablename,self.query,fields) 10114 ret and [f(self,new_fields) for f in table._after_update] 10115 else: 10116 ret = 0 10117 response.updated = ret 10118 return response
10119
10120 - def delete_uploaded_files(self, upload_fields=None):
10121 table = self.db[self.db._adapter.tables(self.query)[0]] 10122 # ## mind uploadfield==True means file is not in DB 10123 if upload_fields: 10124 fields = upload_fields.keys() 10125 else: 10126 fields = table.fields 10127 fields = [f for f in fields if table[f].type == 'upload' 10128 and table[f].uploadfield == True 10129 and table[f].autodelete] 10130 if not fields: 10131 return False 10132 for record in self.select(*[table[f] for f in fields]): 10133 for fieldname in fields: 10134 field = table[fieldname] 10135 oldname = record.get(fieldname, None) 10136 if not oldname: 10137 continue 10138 if upload_fields and oldname == upload_fields[fieldname]: 10139 continue 10140 if field.custom_delete: 10141 field.custom_delete(oldname) 10142 else: 10143 uploadfolder = field.uploadfolder 10144 if not uploadfolder: 10145 uploadfolder = pjoin( 10146 self.db._adapter.folder, '..', 'uploads') 10147 if field.uploadseparate: 10148 items = oldname.split('.') 10149 uploadfolder = pjoin( 10150 uploadfolder, 10151 "%s.%s" % (items[0], items[1]), 10152 items[2][:2]) 10153 oldpath = pjoin(uploadfolder, oldname) 10154 if exists(oldpath): 10155 os.unlink(oldpath) 10156 return False
10157
10158 -class RecordUpdater(object):
10159 - def __init__(self, colset, table, id):
10160 self.colset, self.db, self.tablename, self.id = \ 10161 colset, table._db, table._tablename, id
10162
10163 - def __call__(self, **fields):
10164 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 10165 table = db[tablename] 10166 newfields = fields or dict(colset) 10167 for fieldname in newfields.keys(): 10168 if not fieldname in table.fields or table[fieldname].type=='id': 10169 del newfields[fieldname] 10170 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 10171 colset.update(newfields) 10172 return colset
10173
10174 -class RecordDeleter(object):
10175 - def __init__(self, table, id):
10176 self.db, self.tablename, self.id = table._db, table._tablename, id
10177 - def __call__(self):
10178 return self.db(self.db[self.tablename]._id==self.id).delete()
10179
10180 -class LazyReferenceGetter(object):
10181 - def __init__(self, table, id):
10182 self.db, self.tablename, self.id = table._db, table._tablename, id
10183 - def __call__(self, other_tablename):
10184 if self.db._lazy_tables is False: 10185 raise AttributeError() 10186 table = self.db[self.tablename] 10187 other_table = self.db[other_tablename] 10188 for rfield in table._referenced_by: 10189 if rfield.table == other_table: 10190 return LazySet(rfield, self.id) 10191 10192 raise AttributeError()
10193
10194 -class LazySet(object):
10195 - def __init__(self, field, id):
10196 self.db, self.tablename, self.fieldname, self.id = \ 10197 field.db, field._tablename, field.name, id
10198 - def _getset(self):
10199 query = self.db[self.tablename][self.fieldname]==self.id 10200 return Set(self.db,query)
10201 - def __repr__(self):
10202 return repr(self._getset())
10203 - def __call__(self, query, ignore_common_filters=False):
10204 return self._getset()(query, ignore_common_filters)
10205 - def _count(self,distinct=None):
10206 return self._getset()._count(distinct)
10207 - def _select(self, *fields, **attributes):
10208 return self._getset()._select(*fields,**attributes)
10209 - def _delete(self):
10210 return self._getset()._delete()
10211 - def _update(self, **update_fields):
10212 return self._getset()._update(**update_fields)
10213 - def isempty(self):
10214 return self._getset().isempty()
10215 - def count(self,distinct=None, cache=None):
10216 return self._getset().count(distinct,cache)
10217 - def select(self, *fields, **attributes):
10218 return self._getset().select(*fields,**attributes)
10219 - def nested_select(self,*fields,**attributes):
10220 return self._getset().nested_select(*fields,**attributes)
10221 - def delete(self):
10222 return self._getset().delete()
10223 - def update(self, **update_fields):
10224 return self._getset().update(**update_fields)
10225 - def update_naive(self, **update_fields):
10226 return self._getset().update_naive(**update_fields)
10227 - def validate_and_update(self, **update_fields):
10228 return self._getset().validate_and_update(**update_fields)
10229 - def delete_uploaded_files(self, upload_fields=None):
10230 return self._getset().delete_uploaded_files(upload_fields)
10231
10232 -class VirtualCommand(object):
10233 - def __init__(self,method,row):
10234 self.method=method 10235 self.row=row
10236 - def __call__(self,*args,**kwargs):
10237 return self.method(self.row,*args,**kwargs)
10238
10239 -def lazy_virtualfield(f):
10240 f.__lazy__ = True 10241 return f
10242
10243 -class Rows(object):
10244 10245 """ 10246 A wrapper for the return value of a select. It basically represents a table. 10247 It has an iterator and each row is represented as a dictionary. 10248 """ 10249 10250 # ## TODO: this class still needs some work to care for ID/OID 10251
10252 - def __init__( 10253 self, 10254 db=None, 10255 records=[], 10256 colnames=[], 10257 compact=True, 10258 rawrows=None 10259 ):
10260 self.db = db 10261 self.records = records 10262 self.colnames = colnames 10263 self.compact = compact 10264 self.response = rawrows
10265
10266 - def __repr__(self):
10267 return '<Rows (%s)>' % len(self.records)
10268
10269 - def setvirtualfields(self,**keyed_virtualfields):
10270 """ 10271 db.define_table('x',Field('number','integer')) 10272 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10273 10274 from gluon.dal import lazy_virtualfield 10275 10276 class MyVirtualFields(object): 10277 # normal virtual field (backward compatible, discouraged) 10278 def normal_shift(self): return self.x.number+1 10279 # lazy virtual field (because of @staticmethod) 10280 @lazy_virtualfield 10281 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10282 db.x.virtualfields.append(MyVirtualFields()) 10283 10284 for row in db(db.x).select(): 10285 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10286 """ 10287 if not keyed_virtualfields: 10288 return self 10289 for row in self.records: 10290 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10291 attributes = dir(virtualfields) 10292 if not tablename in row: 10293 box = row[tablename] = Row() 10294 else: 10295 box = row[tablename] 10296 updated = False 10297 for attribute in attributes: 10298 if attribute[0] != '_': 10299 method = getattr(virtualfields,attribute) 10300 if hasattr(method,'__lazy__'): 10301 box[attribute]=VirtualCommand(method,row) 10302 elif type(method)==types.MethodType: 10303 if not updated: 10304 virtualfields.__dict__.update(row) 10305 updated = True 10306 box[attribute]=method() 10307 return self
10308
10309 - def __and__(self,other):
10310 if self.colnames!=other.colnames: 10311 raise Exception('Cannot & incompatible Rows objects') 10312 records = self.records+other.records 10313 return Rows(self.db,records,self.colnames)
10314
10315 - def __or__(self,other):
10316 if self.colnames!=other.colnames: 10317 raise Exception('Cannot | incompatible Rows objects') 10318 records = self.records 10319 records += [record for record in other.records \ 10320 if not record in records] 10321 return Rows(self.db,records,self.colnames)
10322
10323 - def __nonzero__(self):
10324 if len(self.records): 10325 return 1 10326 return 0
10327
10328 - def __len__(self):
10329 return len(self.records)
10330
10331 - def __getslice__(self, a, b):
10332 return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact)
10333
10334 - def __getitem__(self, i):
10335 row = self.records[i] 10336 keys = row.keys() 10337 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10338 return row[row.keys()[0]] 10339 return row
10340
10341 - def __iter__(self):
10342 """ 10343 iterator over records 10344 """ 10345 10346 for i in xrange(len(self)): 10347 yield self[i]
10348
10349 - def __str__(self):
10350 """ 10351 serializes the table into a csv file 10352 """ 10353 10354 s = StringIO.StringIO() 10355 self.export_to_csv_file(s) 10356 return s.getvalue()
10357
10358 - def first(self):
10359 if not self.records: 10360 return None 10361 return self[0]
10362
10363 - def last(self):
10364 if not self.records: 10365 return None 10366 return self[-1]
10367
10368 - def find(self,f,limitby=None):
10369 """ 10370 returns a new Rows object, a subset of the original object, 10371 filtered by the function f 10372 """ 10373 if not self: 10374 return Rows(self.db, [], self.colnames) 10375 records = [] 10376 if limitby: 10377 a,b = limitby 10378 else: 10379 a,b = 0,len(self) 10380 k = 0 10381 for row in self: 10382 if f(row): 10383 if a<=k: records.append(row) 10384 k += 1 10385 if k==b: break 10386 return Rows(self.db, records, self.colnames)
10387
10388 - def exclude(self, f):
10389 """ 10390 removes elements from the calling Rows object, filtered by the function f, 10391 and returns a new Rows object containing the removed elements 10392 """ 10393 if not self.records: 10394 return Rows(self.db, [], self.colnames) 10395 removed = [] 10396 i=0 10397 while i<len(self): 10398 row = self[i] 10399 if f(row): 10400 removed.append(self.records[i]) 10401 del self.records[i] 10402 else: 10403 i += 1 10404 return Rows(self.db, removed, self.colnames)
10405
10406 - def sort(self, f, reverse=False):
10407 """ 10408 returns a list of sorted elements (not sorted in place) 10409 """ 10410 rows = Rows(self.db,[],self.colnames,compact=False) 10411 rows.records = sorted(self,key=f,reverse=reverse) 10412 return rows
10413
10414 - def group_by_value(self, *fields, **args):
10415 """ 10416 regroups the rows, by one of the fields 10417 """ 10418 one_result = False 10419 if 'one_result' in args: 10420 one_result = args['one_result'] 10421 10422 def build_fields_struct(row, fields, num, groups): 10423 ''' helper function: 10424 ''' 10425 if num > len(fields)-1: 10426 if one_result: 10427 return row 10428 else: 10429 return [row] 10430 10431 key = fields[num] 10432 value = row[key] 10433 10434 if value not in groups: 10435 groups[value] = build_fields_struct(row, fields, num+1, {}) 10436 else: 10437 struct = build_fields_struct(row, fields, num+1, groups[ value ]) 10438 10439 # still have more grouping to do 10440 if type(struct) == type(dict()): 10441 groups[value].update() 10442 # no more grouping, first only is off 10443 elif type(struct) == type(list()): 10444 groups[value] += struct 10445 # no more grouping, first only on 10446 else: 10447 groups[value] = struct 10448 10449 return groups
10450 10451 if len(fields) == 0: 10452 return self 10453 10454 # if select returned no results 10455 if not self.records: 10456 return {} 10457 10458 grouped_row_group = dict() 10459 10460 # build the struct 10461 for row in self: 10462 build_fields_struct(row, fields, 0, grouped_row_group) 10463 10464 return grouped_row_group
10465
10466 - def render(self, i=None, fields=None):
10467 """ 10468 Takes an index and returns a copy of the indexed row with values 10469 transformed via the "represent" attributes of the associated fields. 10470 10471 If no index is specified, a generator is returned for iteration 10472 over all the rows. 10473 10474 fields -- a list of fields to transform (if None, all fields with 10475 "represent" attributes will be transformed). 10476 """ 10477 10478 10479 if i is None: 10480 return (self.repr(i, fields=fields) for i in range(len(self))) 10481 import sqlhtml 10482 row = copy.deepcopy(self.records[i]) 10483 keys = row.keys() 10484 tables = [f.tablename for f in fields] if fields \ 10485 else [k for k in keys if k != '_extra'] 10486 for table in tables: 10487 repr_fields = [f.name for f in fields if f.tablename == table] \ 10488 if fields else [k for k in row[table].keys() 10489 if (hasattr(self.db[table], k) and 10490 isinstance(self.db[table][k], Field) 10491 and self.db[table][k].represent)] 10492 for field in repr_fields: 10493 row[table][field] = sqlhtml.represent( 10494 self.db[table][field], row[table][field], row[table]) 10495 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10496 return row[keys[0]] 10497 return row
10498
10499 - def as_list(self, 10500 compact=True, 10501 storage_to_dict=True, 10502 datetime_to_str=False, 10503 custom_types=None):
10504 """ 10505 returns the data as a list or dictionary. 10506 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10507 :param datetime_to_str: convert datetime fields as strings (default False) 10508 """ 10509 (oc, self.compact) = (self.compact, compact) 10510 if storage_to_dict: 10511 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10512 else: 10513 items = [item for item in self] 10514 self.compact = compact 10515 return items
10516 10517
10518 - def as_dict(self, 10519 key='id', 10520 compact=True, 10521 storage_to_dict=True, 10522 datetime_to_str=False, 10523 custom_types=None):
10524 """ 10525 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10526 10527 :param key: the name of the field to be used as dict key, normally the id 10528 :param compact: ? (default True) 10529 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10530 :param datetime_to_str: convert datetime fields as strings (default False) 10531 """ 10532 10533 # test for multiple rows 10534 multi = False 10535 f = self.first() 10536 if f and isinstance(key, basestring): 10537 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10538 if (not "." in key) and multi: 10539 # No key provided, default to int indices 10540 def new_key(): 10541 i = 0 10542 while True: 10543 yield i 10544 i += 1
10545 key_generator = new_key() 10546 key = lambda r: key_generator.next() 10547 10548 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10549 if isinstance(key,str) and key.count('.')==1: 10550 (table, field) = key.split('.') 10551 return dict([(r[table][field],r) for r in rows]) 10552 elif isinstance(key,str): 10553 return dict([(r[key],r) for r in rows]) 10554 else: 10555 return dict([(key(r),r) for r in rows]) 10556
10557 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10558 """ 10559 export data to csv, the first line contains the column names 10560 10561 :param ofile: where the csv must be exported to 10562 :param null: how null values must be represented (default '<NULL>') 10563 :param delimiter: delimiter to separate values (default ',') 10564 :param quotechar: character to use to quote string values (default '"') 10565 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10566 :param represent: use the fields .represent value (default False) 10567 :param colnames: list of column names to use (default self.colnames) 10568 This will only work when exporting rows objects!!!! 10569 DO NOT use this with db.export_to_csv() 10570 """ 10571 delimiter = kwargs.get('delimiter', ',') 10572 quotechar = kwargs.get('quotechar', '"') 10573 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10574 represent = kwargs.get('represent', False) 10575 writer = csv.writer(ofile, delimiter=delimiter, 10576 quotechar=quotechar, quoting=quoting) 10577 colnames = kwargs.get('colnames', self.colnames) 10578 write_colnames = kwargs.get('write_colnames',True) 10579 # a proper csv starting with the column names 10580 if write_colnames: 10581 writer.writerow(colnames) 10582 10583 def none_exception(value): 10584 """ 10585 returns a cleaned up value that can be used for csv export: 10586 - unicode text is encoded as such 10587 - None values are replaced with the given representation (default <NULL>) 10588 """ 10589 if value is None: 10590 return null 10591 elif isinstance(value, unicode): 10592 return value.encode('utf8') 10593 elif isinstance(value,Reference): 10594 return long(value) 10595 elif hasattr(value, 'isoformat'): 10596 return value.isoformat()[:19].replace('T', ' ') 10597 elif isinstance(value, (list,tuple)): # for type='list:..' 10598 return bar_encode(value) 10599 return value
10600 10601 for record in self: 10602 row = [] 10603 for col in colnames: 10604 if not REGEX_TABLE_DOT_FIELD.match(col): 10605 row.append(record._extra[col]) 10606 else: 10607 (t, f) = col.split('.') 10608 field = self.db[t][f] 10609 if isinstance(record.get(t, None), (Row,dict)): 10610 value = record[t][f] 10611 else: 10612 value = record[f] 10613 if field.type=='blob' and not value is None: 10614 value = base64.b64encode(value) 10615 elif represent and field.represent: 10616 value = field.represent(value) 10617 row.append(none_exception(value)) 10618 writer.writerow(row) 10619
10620 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10621 """ 10622 serializes the table using sqlhtml.SQLTABLE (if present) 10623 """ 10624 10625 if strict: 10626 ncols = len(self.colnames) 10627 return '<%s>\n%s\n</%s>' % (rows_name, 10628 '\n'.join(row.as_xml(row_name=row_name, 10629 colnames=self.colnames) for 10630 row in self), rows_name) 10631 10632 import sqlhtml 10633 return sqlhtml.SQLTABLE(self).xml()
10634
10635 - def as_xml(self,row_name='row',rows_name='rows'):
10636 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10637
10638 - def as_json(self, mode='object', default=None):
10639 """ 10640 serializes the rows to a JSON list or object with objects 10641 mode='object' is not implemented (should return a nested 10642 object structure) 10643 """ 10644 10645 items = [record.as_json(mode=mode, default=default, 10646 serialize=False, 10647 colnames=self.colnames) for 10648 record in self] 10649 10650 if have_serializers: 10651 return serializers.json(items, 10652 default=default or 10653 serializers.custom_json) 10654 elif simplejson: 10655 return simplejson.dumps(items) 10656 else: 10657 raise RuntimeError("missing simplejson")
10658 10659 # for consistent naming yet backwards compatible 10660 as_csv = __str__ 10661 json = as_json 10662
10663 10664 ################################################################################ 10665 # dummy function used to define some doctests 10666 ################################################################################ 10667 10668 -def test_all():
10669 """ 10670 10671 >>> if len(sys.argv)<2: db = DAL("sqlite://test.db") 10672 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10673 >>> tmp = db.define_table('users',\ 10674 Field('stringf', 'string', length=32, required=True),\ 10675 Field('booleanf', 'boolean', default=False),\ 10676 Field('passwordf', 'password', notnull=True),\ 10677 Field('uploadf', 'upload'),\ 10678 Field('blobf', 'blob'),\ 10679 Field('integerf', 'integer', unique=True),\ 10680 Field('doublef', 'double', unique=True,notnull=True),\ 10681 Field('jsonf', 'json'),\ 10682 Field('datef', 'date', default=datetime.date.today()),\ 10683 Field('timef', 'time'),\ 10684 Field('datetimef', 'datetime'),\ 10685 migrate='test_user.table') 10686 10687 Insert a field 10688 10689 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10690 uploadf=None, integerf=5, doublef=3.14,\ 10691 jsonf={"j": True},\ 10692 datef=datetime.date(2001, 1, 1),\ 10693 timef=datetime.time(12, 30, 15),\ 10694 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10695 1 10696 10697 Drop the table 10698 10699 >>> db.users.drop() 10700 10701 Examples of insert, select, update, delete 10702 10703 >>> tmp = db.define_table('person',\ 10704 Field('name'),\ 10705 Field('birth','date'),\ 10706 migrate='test_person.table') 10707 >>> person_id = db.person.insert(name='Marco',birth='2005-06-22') 10708 >>> person_id = db.person.insert(name='Massimo',birth='1971-12-21') 10709 10710 commented len(db().select(db.person.ALL)) 10711 commented 2 10712 10713 >>> me = db(db.person.id==person_id).select()[0] # test select 10714 >>> me.name 10715 'Massimo' 10716 >>> db.person[2].name 10717 'Massimo' 10718 >>> db.person(2).name 10719 'Massimo' 10720 >>> db.person(name='Massimo').name 10721 'Massimo' 10722 >>> db.person(db.person.name=='Massimo').name 10723 'Massimo' 10724 >>> row = db.person[2] 10725 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10726 True 10727 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10728 1 10729 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10730 1 10731 10732 Update a single record 10733 10734 >>> me.update_record(name="Max") 10735 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10736 >>> me.name 10737 'Max' 10738 10739 Examples of complex search conditions 10740 10741 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10742 1 10743 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10744 1 10745 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10746 1 10747 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10748 >>> me.name 10749 'Max' 10750 10751 Examples of search conditions using extract from date/datetime/time 10752 10753 >>> len(db(db.person.birth.month()==12).select()) 10754 1 10755 >>> len(db(db.person.birth.year()>1900).select()) 10756 1 10757 10758 Example of usage of NULL 10759 10760 >>> len(db(db.person.birth==None).select()) ### test NULL 10761 0 10762 >>> len(db(db.person.birth!=None).select()) ### test NULL 10763 1 10764 10765 Examples of search conditions using lower, upper, and like 10766 10767 >>> len(db(db.person.name.upper()=='MAX').select()) 10768 1 10769 >>> len(db(db.person.name.like('%ax')).select()) 10770 1 10771 >>> len(db(db.person.name.upper().like('%AX')).select()) 10772 1 10773 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10774 0 10775 10776 orderby, groupby and limitby 10777 10778 >>> people = db().select(db.person.name, orderby=db.person.name) 10779 >>> order = db.person.name|~db.person.birth 10780 >>> people = db().select(db.person.name, orderby=order) 10781 10782 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10783 10784 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10785 10786 Example of one 2 many relation 10787 10788 >>> tmp = db.define_table('dog',\ 10789 Field('name'),\ 10790 Field('birth','date'),\ 10791 Field('owner',db.person),\ 10792 migrate='test_dog.table') 10793 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10794 1 10795 10796 A simple JOIN 10797 10798 >>> len(db(db.dog.owner==db.person.id).select()) 10799 1 10800 10801 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10802 1 10803 10804 Drop tables 10805 10806 >>> db.dog.drop() 10807 >>> db.person.drop() 10808 10809 Example of many 2 many relation and Set 10810 10811 >>> tmp = db.define_table('author', Field('name'),\ 10812 migrate='test_author.table') 10813 >>> tmp = db.define_table('paper', Field('title'),\ 10814 migrate='test_paper.table') 10815 >>> tmp = db.define_table('authorship',\ 10816 Field('author_id', db.author),\ 10817 Field('paper_id', db.paper),\ 10818 migrate='test_authorship.table') 10819 >>> aid = db.author.insert(name='Massimo') 10820 >>> pid = db.paper.insert(title='QCD') 10821 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10822 10823 Define a Set 10824 10825 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10826 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10827 >>> for row in rows: print row.author.name, row.paper.title 10828 Massimo QCD 10829 10830 Example of search condition using belongs 10831 10832 >>> set = (1, 2, 3) 10833 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10834 >>> print rows[0].title 10835 QCD 10836 10837 Example of search condition using nested select 10838 10839 >>> nested_select = db()._select(db.authorship.paper_id) 10840 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10841 >>> print rows[0].title 10842 QCD 10843 10844 Example of expressions 10845 10846 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10847 >>> db(mynumber).delete() 10848 0 10849 >>> for i in range(10): tmp = mynumber.insert(x=i) 10850 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10851 45 10852 10853 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10854 5 10855 10856 Output in csv 10857 10858 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10859 author.name,paper.title\r 10860 Massimo,QCD 10861 10862 Delete all leftover tables 10863 10864 >>> DAL.distributed_transaction_commit(db) 10865 10866 >>> db.mynumber.drop() 10867 >>> db.authorship.drop() 10868 >>> db.author.drop() 10869 >>> db.paper.drop() 10870 """
10871 ################################################################################ 10872 # deprecated since the new DAL; here only for backward compatibility 10873 ################################################################################ 10874 10875 SQLField = Field 10876 SQLTable = Table 10877 SQLXorable = Expression 10878 SQLQuery = Query 10879 SQLSet = Set 10880 SQLRows = Rows 10881 SQLStorage = Row 10882 SQLDB = DAL 10883 GQLDB = DAL 10884 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10885 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10886 10887 ################################################################################ 10888 # Geodal utils 10889 ################################################################################ 10890 10891 -def geoPoint(x,y):
10892 return "POINT (%f %f)" % (x,y)
10893
10894 -def geoLine(*line):
10895 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10896
10897 -def geoPolygon(*line):
10898 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10899 10900 ################################################################################ 10901 # run tests 10902 ################################################################################ 10903 10904 if __name__ == '__main__': 10905 import doctest 10906 doctest.testmod() 10907